2024-12-16 18:51:36,487 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-16 18:51:36,502 main DEBUG Took 0.012852 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-16 18:51:36,503 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-16 18:51:36,503 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-16 18:51:36,504 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-16 18:51:36,506 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,515 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-16 18:51:36,534 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,535 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,536 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,536 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,537 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,537 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,538 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,539 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,539 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,540 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,541 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,541 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,542 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,542 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,543 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,543 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,544 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,546 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,547 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,550 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,550 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,551 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,552 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 18:51:36,552 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,552 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-16 18:51:36,554 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 18:51:36,555 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-16 18:51:36,559 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-16 18:51:36,560 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-16 18:51:36,561 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-16 18:51:36,562 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-16 18:51:36,582 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-16 18:51:36,589 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-16 18:51:36,592 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-16 18:51:36,592 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-16 18:51:36,593 main DEBUG createAppenders(={Console}) 2024-12-16 18:51:36,594 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-16 18:51:36,594 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-16 18:51:36,595 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-16 18:51:36,595 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-16 18:51:36,596 main DEBUG OutputStream closed 2024-12-16 18:51:36,596 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-16 18:51:36,596 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-16 18:51:36,597 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-16 18:51:36,713 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-16 18:51:36,715 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-16 18:51:36,717 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-16 18:51:36,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-16 18:51:36,720 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-16 18:51:36,720 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-16 18:51:36,721 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-16 18:51:36,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-16 18:51:36,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-16 18:51:36,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-16 18:51:36,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-16 18:51:36,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-16 18:51:36,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-16 18:51:36,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-16 18:51:36,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-16 18:51:36,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-16 18:51:36,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-16 18:51:36,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-16 18:51:36,729 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16 18:51:36,729 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-16 18:51:36,730 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-16 18:51:36,731 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-16T18:51:37,001 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af 2024-12-16 18:51:37,004 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-16 18:51:37,005 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16T18:51:37,013 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-16T18:51:37,043 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=977, ProcessCount=11, AvailableMemoryMB=4576 2024-12-16T18:51:37,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:51:37,048 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800, deleteOnExit=true 2024-12-16T18:51:37,048 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:51:37,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/test.cache.data in system properties and HBase conf 2024-12-16T18:51:37,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:51:37,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:51:37,051 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:51:37,052 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:51:37,052 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:51:37,160 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-16T18:51:37,263 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:51:37,268 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:51:37,269 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:51:37,270 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:51:37,270 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:51:37,271 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:51:37,272 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:51:37,273 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:51:37,274 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:51:37,274 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:51:37,275 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:51:37,275 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:51:37,276 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:51:37,276 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:51:37,277 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:51:37,780 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:51:38,430 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-16T18:51:38,520 INFO [Time-limited test {}] log.Log(170): Logging initialized @2886ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-16T18:51:38,605 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:51:38,692 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:51:38,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:51:38,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:51:38,728 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:51:38,745 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:51:38,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17bd5227{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:51:38,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fabcd45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:51:38,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@805400f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/java.io.tmpdir/jetty-localhost-38907-hadoop-hdfs-3_4_1-tests_jar-_-any-6382929611230766721/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:51:38,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6567b82c{HTTP/1.1, (http/1.1)}{localhost:38907} 2024-12-16T18:51:38,975 INFO [Time-limited test {}] server.Server(415): Started @3341ms 2024-12-16T18:51:38,999 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:51:39,467 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:51:39,474 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:51:39,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:51:39,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:51:39,475 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:51:39,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e40bccb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:51:39,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44404b82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:51:39,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64b46cf2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/java.io.tmpdir/jetty-localhost-45231-hadoop-hdfs-3_4_1-tests_jar-_-any-8576212025713119174/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:51:39,576 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1872bb43{HTTP/1.1, (http/1.1)}{localhost:45231} 2024-12-16T18:51:39,576 INFO [Time-limited test {}] server.Server(415): Started @3942ms 2024-12-16T18:51:39,630 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:51:39,772 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:51:39,779 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:51:39,789 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:51:39,789 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:51:39,789 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:51:39,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22a0043c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:51:39,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f9d2cc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:51:39,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e5af57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/java.io.tmpdir/jetty-localhost-45453-hadoop-hdfs-3_4_1-tests_jar-_-any-2420896819751497886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:51:39,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68aaa55{HTTP/1.1, (http/1.1)}{localhost:45453} 2024-12-16T18:51:39,914 INFO [Time-limited test {}] server.Server(415): Started @4280ms 2024-12-16T18:51:39,917 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:51:40,997 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data4/current/BP-1771638224-172.17.0.2-1734375097902/current, will proceed with Du for space computation calculation, 2024-12-16T18:51:40,997 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data2/current/BP-1771638224-172.17.0.2-1734375097902/current, will proceed with Du for space computation calculation, 2024-12-16T18:51:40,997 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data3/current/BP-1771638224-172.17.0.2-1734375097902/current, will proceed with Du for space computation calculation, 2024-12-16T18:51:40,997 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data1/current/BP-1771638224-172.17.0.2-1734375097902/current, will proceed with Du for space computation calculation, 2024-12-16T18:51:41,044 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:51:41,044 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:51:41,105 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961d3de64848f0a3 with lease ID 0x392f6e05d20c4f4c: Processing first storage report for DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b from datanode DatanodeRegistration(127.0.0.1:36489, datanodeUuid=1de1932b-ec13-49a0-8d1e-a5962bf3c8e3, infoPort=41133, infoSecurePort=0, ipcPort=37547, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902) 2024-12-16T18:51:41,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961d3de64848f0a3 with lease ID 0x392f6e05d20c4f4c: from storage DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b node DatanodeRegistration(127.0.0.1:36489, datanodeUuid=1de1932b-ec13-49a0-8d1e-a5962bf3c8e3, infoPort=41133, infoSecurePort=0, ipcPort=37547, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-16T18:51:41,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf97b6649e13671a with lease ID 0x392f6e05d20c4f4b: Processing first storage report for DS-43ee75b6-3091-4436-95e8-a3fc73c4e385 from datanode DatanodeRegistration(127.0.0.1:37003, datanodeUuid=809d6397-4d0c-4e10-bb08-068043b319c4, infoPort=34965, infoSecurePort=0, ipcPort=36487, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902) 2024-12-16T18:51:41,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf97b6649e13671a with lease ID 0x392f6e05d20c4f4b: from storage DS-43ee75b6-3091-4436-95e8-a3fc73c4e385 node DatanodeRegistration(127.0.0.1:37003, datanodeUuid=809d6397-4d0c-4e10-bb08-068043b319c4, infoPort=34965, infoSecurePort=0, ipcPort=36487, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T18:51:41,108 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961d3de64848f0a3 with lease ID 0x392f6e05d20c4f4c: Processing first storage report for DS-e1ddff21-2f05-4d96-ac46-ee6053beb3a1 from datanode DatanodeRegistration(127.0.0.1:36489, datanodeUuid=1de1932b-ec13-49a0-8d1e-a5962bf3c8e3, infoPort=41133, infoSecurePort=0, ipcPort=37547, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902) 2024-12-16T18:51:41,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961d3de64848f0a3 with lease ID 0x392f6e05d20c4f4c: from storage DS-e1ddff21-2f05-4d96-ac46-ee6053beb3a1 node DatanodeRegistration(127.0.0.1:36489, datanodeUuid=1de1932b-ec13-49a0-8d1e-a5962bf3c8e3, infoPort=41133, infoSecurePort=0, ipcPort=37547, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:51:41,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf97b6649e13671a with lease ID 0x392f6e05d20c4f4b: Processing first storage report for DS-abd27046-b22a-4932-afa7-4f383198e5b2 from datanode DatanodeRegistration(127.0.0.1:37003, datanodeUuid=809d6397-4d0c-4e10-bb08-068043b319c4, infoPort=34965, infoSecurePort=0, ipcPort=36487, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902) 2024-12-16T18:51:41,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf97b6649e13671a with lease ID 0x392f6e05d20c4f4b: from storage DS-abd27046-b22a-4932-afa7-4f383198e5b2 node DatanodeRegistration(127.0.0.1:37003, datanodeUuid=809d6397-4d0c-4e10-bb08-068043b319c4, infoPort=34965, infoSecurePort=0, ipcPort=36487, storageInfo=lv=-57;cid=testClusterID;nsid=1412994795;c=1734375097902), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:51:41,186 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af 2024-12-16T18:51:41,270 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/zookeeper_0, clientPort=61737, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:51:41,284 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=61737 2024-12-16T18:51:41,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:41,299 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:41,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:51:41,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:51:41,984 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc with version=8 2024-12-16T18:51:41,985 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:51:42,113 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-16T18:51:42,333 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:51:42,354 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,355 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,355 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:51:42,355 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,355 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:51:42,475 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:51:42,534 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-16T18:51:42,543 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-16T18:51:42,547 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:51:42,579 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 3114 (auto-detected) 2024-12-16T18:51:42,580 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-16T18:51:42,602 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35675 2024-12-16T18:51:42,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:42,615 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:42,631 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35675 connecting to ZooKeeper ensemble=127.0.0.1:61737 2024-12-16T18:51:42,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356750x0, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:51:42,698 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35675-0x100e451d1ff0000 connected 2024-12-16T18:51:42,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:51:42,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:51:42,795 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:51:42,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35675 2024-12-16T18:51:42,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35675 2024-12-16T18:51:42,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35675 2024-12-16T18:51:42,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35675 2024-12-16T18:51:42,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35675 2024-12-16T18:51:42,810 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc, hbase.cluster.distributed=false 2024-12-16T18:51:42,891 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:51:42,892 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,892 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,892 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:51:42,892 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:51:42,892 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:51:42,895 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:51:42,897 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:51:42,898 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37043 2024-12-16T18:51:42,900 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:51:42,906 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:51:42,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:42,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:42,920 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37043 connecting to ZooKeeper ensemble=127.0.0.1:61737 2024-12-16T18:51:42,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370430x0, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:51:42,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:370430x0, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:51:42,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37043-0x100e451d1ff0001 connected 2024-12-16T18:51:42,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:51:42,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:51:42,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37043 2024-12-16T18:51:42,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37043 2024-12-16T18:51:42,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37043 2024-12-16T18:51:42,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37043 2024-12-16T18:51:42,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37043 2024-12-16T18:51:42,992 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:43,008 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:35675 2024-12-16T18:51:43,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:51:43,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:51:43,031 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:43,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:51:43,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:51:43,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:43,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:43,127 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:51:43,128 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,35675,1734375102108 from backup master directory 2024-12-16T18:51:43,128 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:51:43,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:43,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:51:43,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:51:43,177 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:51:43,177 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:43,179 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-16T18:51:43,181 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-16T18:51:43,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:51:43,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:51:43,253 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase.id with ID: 8661d6ee-ea91-48cf-ba1c-ac018a279067 2024-12-16T18:51:43,297 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:43,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:43,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:51:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:51:43,382 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:51:43,384 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:51:43,389 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:51:43,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:51:43,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:51:43,435 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store 2024-12-16T18:51:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:51:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:51:43,457 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-16T18:51:43,458 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:43,459 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:51:43,459 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:51:43,459 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:51:43,459 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:51:43,459 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:51:43,460 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:51:43,460 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:51:43,462 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/.initializing 2024-12-16T18:51:43,462 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/WALs/7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:43,476 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C35675%2C1734375102108, suffix=, logDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/WALs/7a6e4bf70377,35675,1734375102108, archiveDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/oldWALs, maxLogs=10 2024-12-16T18:51:43,484 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C35675%2C1734375102108.1734375103482 2024-12-16T18:51:43,484 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-16T18:51:43,485 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-16T18:51:43,502 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/WALs/7a6e4bf70377,35675,1734375102108/7a6e4bf70377%2C35675%2C1734375102108.1734375103482 2024-12-16T18:51:43,510 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:51:43,511 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:51:43,511 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:43,514 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,515 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:51:43,584 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:43,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:43,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:51:43,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:43,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:51:43,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:51:43,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:43,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:51:43,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:51:43,602 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:43,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:51:43,608 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,609 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,619 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:51:43,623 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:51:43,628 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:51:43,629 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734842, jitterRate=-0.06560066342353821}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:51:43,633 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:51:43,634 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:51:43,662 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7243cbd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:51:43,694 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:51:43,705 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:51:43,705 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:51:43,707 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:51:43,708 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-16T18:51:43,712 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-16T18:51:43,713 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:51:43,736 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:51:43,749 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:51:43,804 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:51:43,806 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:51:43,808 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:51:43,843 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:51:43,845 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:51:43,849 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:51:43,875 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:51:43,877 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:51:43,912 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:51:43,924 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:51:43,969 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:51:44,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:51:44,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:51:44,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,010 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,35675,1734375102108, sessionid=0x100e451d1ff0000, setting cluster-up flag (Was=false) 2024-12-16T18:51:44,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,184 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:51:44,186 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:44,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:44,226 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:51:44,229 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:44,294 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:37043 2024-12-16T18:51:44,295 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1008): ClusterId : 8661d6ee-ea91-48cf-ba1c-ac018a279067 2024-12-16T18:51:44,299 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:51:44,311 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:51:44,312 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:51:44,317 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:51:44,319 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:51:44,319 DEBUG [RS:0;7a6e4bf70377:37043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fa6ca95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:51:44,325 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:51:44,326 DEBUG [RS:0;7a6e4bf70377:37043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2db99a0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:51:44,329 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:51:44,330 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:51:44,330 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:51:44,330 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:51:44,333 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,35675,1734375102108 with isa=7a6e4bf70377/172.17.0.2:37043, startcode=1734375102890 2024-12-16T18:51:44,336 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,35675,1734375102108 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:51:44,340 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:51:44,340 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:51:44,340 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:51:44,341 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:51:44,341 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:51:44,341 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,341 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:51:44,341 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,350 DEBUG [RS:0;7a6e4bf70377:37043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:51:44,373 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:51:44,374 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:51:44,382 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:44,383 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:51:44,395 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375134395 2024-12-16T18:51:44,397 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:51:44,403 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:51:44,407 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:51:44,407 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:51:44,408 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:51:44,408 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:51:44,411 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,418 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:51:44,421 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:51:44,421 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:51:44,428 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:51:44,429 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:51:44,431 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375104431,5,FailOnTimeoutGroup] 2024-12-16T18:51:44,446 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375104431,5,FailOnTimeoutGroup] 2024-12-16T18:51:44,446 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,446 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:51:44,448 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,448 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:51:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:51:44,459 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:51:44,459 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc 2024-12-16T18:51:44,487 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42875, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:51:44,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:51:44,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:51:44,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:44,496 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35675 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,500 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35675 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:51:44,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:51:44,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:44,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:44,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:51:44,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:51:44,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:44,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:44,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:51:44,522 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc 2024-12-16T18:51:44,522 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33663 2024-12-16T18:51:44,522 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:51:44,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:51:44,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:44,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:44,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740 2024-12-16T18:51:44,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740 2024-12-16T18:51:44,533 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:51:44,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:51:44,534 DEBUG [RS:0;7a6e4bf70377:37043 {}] zookeeper.ZKUtil(111): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,535 WARN [RS:0;7a6e4bf70377:37043 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:51:44,535 INFO [RS:0;7a6e4bf70377:37043 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:51:44,535 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:51:44,538 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,37043,1734375102890] 2024-12-16T18:51:44,543 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:51:44,544 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843773, jitterRate=0.0729137659072876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:51:44,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:51:44,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:51:44,547 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:51:44,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:51:44,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:51:44,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:51:44,549 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:51:44,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:51:44,553 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:51:44,553 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:51:44,553 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:51:44,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:51:44,569 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:51:44,572 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:51:44,574 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:51:44,585 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:51:44,589 INFO [RS:0;7a6e4bf70377:37043 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:51:44,589 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,590 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:51:44,601 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,601 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,602 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,602 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,602 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,603 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,603 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:51:44,603 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,603 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,603 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,604 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,604 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:51:44,604 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:51:44,604 DEBUG [RS:0;7a6e4bf70377:37043 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:51:44,611 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,611 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,611 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,611 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,611 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,37043,1734375102890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:51:44,639 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:51:44,642 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,37043,1734375102890-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:44,670 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.Replication(204): 7a6e4bf70377,37043,1734375102890 started 2024-12-16T18:51:44,670 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,37043,1734375102890, RpcServer on 7a6e4bf70377/172.17.0.2:37043, sessionid=0x100e451d1ff0001 2024-12-16T18:51:44,671 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:51:44,671 DEBUG [RS:0;7a6e4bf70377:37043 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,671 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,37043,1734375102890' 2024-12-16T18:51:44,672 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:51:44,673 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:51:44,673 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:51:44,673 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:51:44,674 DEBUG [RS:0;7a6e4bf70377:37043 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,674 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,37043,1734375102890' 2024-12-16T18:51:44,674 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:51:44,675 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:51:44,676 DEBUG [RS:0;7a6e4bf70377:37043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:51:44,676 INFO [RS:0;7a6e4bf70377:37043 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:51:44,677 INFO [RS:0;7a6e4bf70377:37043 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:51:44,725 WARN [7a6e4bf70377:35675 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T18:51:44,786 INFO [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C37043%2C1734375102890, suffix=, logDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890, archiveDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs, maxLogs=32 2024-12-16T18:51:44,817 INFO [RS:0;7a6e4bf70377:37043 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375104817 2024-12-16T18:51:44,844 INFO [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375104817 2024-12-16T18:51:44,844 DEBUG [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:51:44,978 DEBUG [7a6e4bf70377:35675 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:51:44,987 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:44,991 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,37043,1734375102890, state=OPENING 2024-12-16T18:51:45,042 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:51:45,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:45,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:45,052 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:51:45,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:51:45,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,37043,1734375102890}] 2024-12-16T18:51:45,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:45,231 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:51:45,235 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52312, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:51:45,251 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:51:45,252 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:51:45,260 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C37043%2C1734375102890.meta, suffix=.meta, logDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890, archiveDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs, maxLogs=32 2024-12-16T18:51:45,263 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.meta.1734375105263.meta 2024-12-16T18:51:45,282 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.meta.1734375105263.meta 2024-12-16T18:51:45,282 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:51:45,286 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:51:45,288 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:51:45,350 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:51:45,356 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:51:45,361 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:51:45,361 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:45,362 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:51:45,362 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:51:45,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:51:45,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:51:45,368 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:45,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:45,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:51:45,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:51:45,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:45,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:45,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:51:45,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:51:45,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:45,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:51:45,381 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740 2024-12-16T18:51:45,386 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740 2024-12-16T18:51:45,391 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:51:45,395 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:51:45,397 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810673, jitterRate=0.03082461655139923}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:51:45,399 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:51:45,407 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375105225 2024-12-16T18:51:45,419 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:51:45,420 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:51:45,420 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:45,422 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,37043,1734375102890, state=OPEN 2024-12-16T18:51:45,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:51:45,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:51:45,463 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:51:45,463 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:51:45,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:51:45,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,37043,1734375102890 in 407 msec 2024-12-16T18:51:45,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:51:45,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 908 msec 2024-12-16T18:51:45,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2070 sec 2024-12-16T18:51:45,480 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375105480, completionTime=-1 2024-12-16T18:51:45,480 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:51:45,480 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:51:45,519 DEBUG [hconnection-0x2a740e5f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:51:45,521 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:51:45,532 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:51:45,532 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375165532 2024-12-16T18:51:45,532 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375225532 2024-12-16T18:51:45,532 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 52 msec 2024-12-16T18:51:45,570 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:45,571 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:45,571 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:45,572 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:35675, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:45,572 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:45,578 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:51:45,583 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:51:45,585 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:51:45,594 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:51:45,598 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:51:45,599 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:45,602 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:51:45,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:51:45,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:51:45,628 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 737b252b3c17601ff08eca64ce3fd6e7, NAME => 'hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc 2024-12-16T18:51:45,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:51:45,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:51:45,651 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:45,652 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 737b252b3c17601ff08eca64ce3fd6e7, disabling compactions & flushes 2024-12-16T18:51:45,652 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:45,652 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:45,652 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. after waiting 0 ms 2024-12-16T18:51:45,652 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:45,652 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:45,653 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 737b252b3c17601ff08eca64ce3fd6e7: 2024-12-16T18:51:45,655 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:51:45,660 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375105656"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375105656"}]},"ts":"1734375105656"} 2024-12-16T18:51:45,685 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:51:45,688 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:51:45,693 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375105689"}]},"ts":"1734375105689"} 2024-12-16T18:51:45,701 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:51:45,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=737b252b3c17601ff08eca64ce3fd6e7, ASSIGN}] 2024-12-16T18:51:45,733 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=737b252b3c17601ff08eca64ce3fd6e7, ASSIGN 2024-12-16T18:51:45,736 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=737b252b3c17601ff08eca64ce3fd6e7, ASSIGN; state=OFFLINE, location=7a6e4bf70377,37043,1734375102890; forceNewPlan=false, retain=false 2024-12-16T18:51:45,886 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=737b252b3c17601ff08eca64ce3fd6e7, regionState=OPENING, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:45,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 737b252b3c17601ff08eca64ce3fd6e7, server=7a6e4bf70377,37043,1734375102890}] 2024-12-16T18:51:46,049 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:46,058 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:46,059 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 737b252b3c17601ff08eca64ce3fd6e7, NAME => 'hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:51:46,059 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,059 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:46,060 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,060 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,068 INFO [StoreOpener-737b252b3c17601ff08eca64ce3fd6e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,072 INFO [StoreOpener-737b252b3c17601ff08eca64ce3fd6e7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 737b252b3c17601ff08eca64ce3fd6e7 columnFamilyName info 2024-12-16T18:51:46,072 DEBUG [StoreOpener-737b252b3c17601ff08eca64ce3fd6e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:46,074 INFO [StoreOpener-737b252b3c17601ff08eca64ce3fd6e7-1 {}] regionserver.HStore(327): Store=737b252b3c17601ff08eca64ce3fd6e7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:51:46,076 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,077 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,082 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:51:46,086 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:51:46,087 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 737b252b3c17601ff08eca64ce3fd6e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781960, jitterRate=-0.005687236785888672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:51:46,089 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 737b252b3c17601ff08eca64ce3fd6e7: 2024-12-16T18:51:46,091 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7., pid=6, masterSystemTime=1734375106049 2024-12-16T18:51:46,096 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:46,096 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:51:46,097 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=737b252b3c17601ff08eca64ce3fd6e7, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:46,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:51:46,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 737b252b3c17601ff08eca64ce3fd6e7, server=7a6e4bf70377,37043,1734375102890 in 209 msec 2024-12-16T18:51:46,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:51:46,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=737b252b3c17601ff08eca64ce3fd6e7, ASSIGN in 378 msec 2024-12-16T18:51:46,114 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:51:46,115 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375106114"}]},"ts":"1734375106114"} 2024-12-16T18:51:46,140 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:51:46,192 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:51:46,199 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:51:46,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 606 msec 2024-12-16T18:51:46,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:51:46,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:46,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:51:46,281 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:51:46,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:51:46,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 80 msec 2024-12-16T18:51:46,361 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:51:46,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:51:46,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 51 msec 2024-12-16T18:51:46,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:51:46,485 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.306sec 2024-12-16T18:51:46,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:51:46,489 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:51:46,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:51:46,502 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:51:46,503 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:51:46,503 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:51:46,505 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:51:46,505 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:51:46,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1bfed419 to 127.0.0.1:61737 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6af8af4e 2024-12-16T18:51:46,534 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:51:46,536 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:51:46,536 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35675,1734375102108-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:51:46,550 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-16T18:51:46,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7650ec16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:51:46,580 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-16T18:51:46,580 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-16T18:51:46,614 DEBUG [hconnection-0x4f6c544d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:51:46,627 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:51:46,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,35675,1734375102108 2024-12-16T18:51:46,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:51:46,692 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:51:46,712 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T18:51:46,749 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T18:51:46,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-16T18:51:46,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-16T18:51:46,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:51:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-16T18:51:46,784 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:51:46,784 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:46,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-16T18:51:46,787 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:51:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:51:46,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741837_1013 (size=389) 2024-12-16T18:51:46,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741837_1013 (size=389) 2024-12-16T18:51:47,258 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 20372a9c124cb3132bd555bb6aecb7c7, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc 2024-12-16T18:51:47,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741838_1014 (size=72) 2024-12-16T18:51:47,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741838_1014 (size=72) 2024-12-16T18:51:47,334 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:47,334 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 20372a9c124cb3132bd555bb6aecb7c7, disabling compactions & flushes 2024-12-16T18:51:47,335 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,335 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,335 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. after waiting 0 ms 2024-12-16T18:51:47,335 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,335 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,335 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:51:47,337 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:51:47,338 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1734375107338"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375107338"}]},"ts":"1734375107338"} 2024-12-16T18:51:47,343 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:51:47,345 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:51:47,346 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375107346"}]},"ts":"1734375107346"} 2024-12-16T18:51:47,350 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-16T18:51:47,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20372a9c124cb3132bd555bb6aecb7c7, ASSIGN}] 2024-12-16T18:51:47,414 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20372a9c124cb3132bd555bb6aecb7c7, ASSIGN 2024-12-16T18:51:47,416 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20372a9c124cb3132bd555bb6aecb7c7, ASSIGN; state=OFFLINE, location=7a6e4bf70377,37043,1734375102890; forceNewPlan=false, retain=false 2024-12-16T18:51:47,568 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=20372a9c124cb3132bd555bb6aecb7c7, regionState=OPENING, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:47,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 20372a9c124cb3132bd555bb6aecb7c7, server=7a6e4bf70377,37043,1734375102890}] 2024-12-16T18:51:47,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:47,776 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,777 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 20372a9c124cb3132bd555bb6aecb7c7, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:51:47,777 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,778 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:51:47,778 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,778 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,782 INFO [StoreOpener-20372a9c124cb3132bd555bb6aecb7c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,788 INFO [StoreOpener-20372a9c124cb3132bd555bb6aecb7c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20372a9c124cb3132bd555bb6aecb7c7 columnFamilyName info 2024-12-16T18:51:47,789 DEBUG [StoreOpener-20372a9c124cb3132bd555bb6aecb7c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:51:47,791 INFO [StoreOpener-20372a9c124cb3132bd555bb6aecb7c7-1 {}] regionserver.HStore(327): Store=20372a9c124cb3132bd555bb6aecb7c7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:51:47,793 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,795 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,801 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:51:47,807 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:51:47,808 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 20372a9c124cb3132bd555bb6aecb7c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699889, jitterRate=-0.11004573106765747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:51:47,810 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:51:47,812 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7., pid=11, masterSystemTime=1734375107754 2024-12-16T18:51:47,818 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=20372a9c124cb3132bd555bb6aecb7c7, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,37043,1734375102890 2024-12-16T18:51:47,819 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,819 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:47,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T18:51:47,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 20372a9c124cb3132bd555bb6aecb7c7, server=7a6e4bf70377,37043,1734375102890 in 235 msec 2024-12-16T18:51:47,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T18:51:47,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20372a9c124cb3132bd555bb6aecb7c7, ASSIGN in 424 msec 2024-12-16T18:51:47,841 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:51:47,841 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375107841"}]},"ts":"1734375107841"} 2024-12-16T18:51:47,847 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-16T18:51:47,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:51:47,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 1.1530 sec 2024-12-16T18:51:50,870 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-16T18:51:50,913 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T18:51:50,914 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:51:50,915 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-16T18:51:52,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T18:51:52,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-16T18:51:52,533 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-16T18:51:52,533 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-16T18:51:52,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T18:51:52,535 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T18:51:52,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:51:52,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-16T18:51:52,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T18:51:52,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-16T18:51:56,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35675 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:51:56,803 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-16T18:51:56,807 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-16T18:51:56,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:51:56,809 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375116809 2024-12-16T18:51:56,819 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375104817 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375116809 2024-12-16T18:51:56,820 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:51:56,820 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375104817 is not closed yet, will try archiving it next time 2024-12-16T18:51:56,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741833_1009 (size=955) 2024-12-16T18:51:56,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741833_1009 (size=955) 2024-12-16T18:52:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37043 {}] regionserver.HRegion(8581): Flush requested on 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:52:08,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20372a9c124cb3132bd555bb6aecb7c7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:52:08,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/eddcd8c2fddb45a881fea20844398fc9 is 1080, key is row0001/info:/1734375116825/Put/seqid=0 2024-12-16T18:52:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741840_1016 (size=12509) 2024-12-16T18:52:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741840_1016 (size=12509) 2024-12-16T18:52:08,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/eddcd8c2fddb45a881fea20844398fc9 2024-12-16T18:52:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/eddcd8c2fddb45a881fea20844398fc9 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9 2024-12-16T18:52:08,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9, entries=7, sequenceid=11, filesize=12.2 K 2024-12-16T18:52:08,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20372a9c124cb3132bd555bb6aecb7c7 in 137ms, sequenceid=11, compaction requested=false 2024-12-16T18:52:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:52:11,182 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:52:16,876 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375136876 2024-12-16T18:52:17,088 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:17,089 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375116809 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375136876 2024-12-16T18:52:17,090 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:52:17,090 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375116809 is not closed yet, will try archiving it next time 2024-12-16T18:52:17,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741839_1015 (size=12399) 2024-12-16T18:52:17,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741839_1015 (size=12399) 2024-12-16T18:52:17,294 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:19,322 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:52:19,324 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:52:19,499 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:21,702 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:23,906 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37043 {}] regionserver.HRegion(8581): Flush requested on 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:52:23,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20372a9c124cb3132bd555bb6aecb7c7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:52:24,110 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:24,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/db08c69aa0884c1e94458a866b04e276 is 1080, key is row0008/info:/1734375130862/Put/seqid=0 2024-12-16T18:52:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741842_1018 (size=12509) 2024-12-16T18:52:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741842_1018 (size=12509) 2024-12-16T18:52:24,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/db08c69aa0884c1e94458a866b04e276 2024-12-16T18:52:24,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/db08c69aa0884c1e94458a866b04e276 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276 2024-12-16T18:52:24,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276, entries=7, sequenceid=21, filesize=12.2 K 2024-12-16T18:52:24,351 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:24,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20372a9c124cb3132bd555bb6aecb7c7 in 445ms, sequenceid=21, compaction requested=false 2024-12-16T18:52:24,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:52:24,351 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-16T18:52:24,351 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:52:24,352 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9 because midkey is the same as first or last row 2024-12-16T18:52:26,110 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:26,537 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T18:52:26,537 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T18:52:28,314 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:28,315 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:28,316 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375136876) roll requested 2024-12-16T18:52:28,316 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375148316 2024-12-16T18:52:28,588 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 266 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:28,790 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:28,790 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375136876 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375148316 2024-12-16T18:52:28,790 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:52:28,791 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375136876 is not closed yet, will try archiving it next time 2024-12-16T18:52:28,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741841_1017 (size=7739) 2024-12-16T18:52:28,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741841_1017 (size=7739) 2024-12-16T18:52:28,796 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375116809 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375116809 2024-12-16T18:52:30,765 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 447 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:32,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 20372a9c124cb3132bd555bb6aecb7c7, had cached 0 bytes from a total of 25018 2024-12-16T18:52:32,969 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:35,171 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:37,382 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:39,384 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T18:52:39,385 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375159385 2024-12-16T18:52:41,183 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:52:44,411 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5023 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:44,411 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5023 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:44,411 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375159385) roll requested 2024-12-16T18:52:45,574 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 737b252b3c17601ff08eca64ce3fd6e7 changed from -1.0 to 0.0, refreshing cache 2024-12-16T18:52:49,412 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:49,412 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:49,412 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375148316 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375159385 2024-12-16T18:52:49,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:52:49,413 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375148316 is not closed yet, will try archiving it next time 2024-12-16T18:52:49,413 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375169413 2024-12-16T18:52:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741843_1019 (size=4753) 2024-12-16T18:52:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741843_1019 (size=4753) 2024-12-16T18:52:54,417 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:54,417 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:54,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37043 {}] regionserver.HRegion(8581): Flush requested on 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:52:54,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20372a9c124cb3132bd555bb6aecb7c7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:52:54,424 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:54,424 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:56,418 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T18:52:59,419 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:59,419 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:59,424 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:59,424 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK], DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK]] 2024-12-16T18:52:59,425 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375159385 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375169413 2024-12-16T18:52:59,426 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:52:59,426 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375159385 is not closed yet, will try archiving it next time 2024-12-16T18:52:59,426 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375169413) roll requested 2024-12-16T18:52:59,426 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375179426 2024-12-16T18:52:59,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/7b8aaaf62b6643fc9db0479b80985d71 is 1080, key is row0015/info:/1734375145909/Put/seqid=0 2024-12-16T18:52:59,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741844_1020 (size=1569) 2024-12-16T18:52:59,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741844_1020 (size=1569) 2024-12-16T18:52:59,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741846_1022 (size=12509) 2024-12-16T18:52:59,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741846_1022 (size=12509) 2024-12-16T18:52:59,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/7b8aaaf62b6643fc9db0479b80985d71 2024-12-16T18:52:59,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/7b8aaaf62b6643fc9db0479b80985d71 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71 2024-12-16T18:52:59,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71, entries=7, sequenceid=31, filesize=12.2 K 2024-12-16T18:53:04,443 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:04,443 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:04,500 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:04,500 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:04,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20372a9c124cb3132bd555bb6aecb7c7 in 10083ms, sequenceid=31, compaction requested=true 2024-12-16T18:53:04,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:53:04,501 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-16T18:53:04,501 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:53:04,501 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9 because midkey is the same as first or last row 2024-12-16T18:53:04,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20372a9c124cb3132bd555bb6aecb7c7:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:53:04,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:53:04,502 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:53:04,505 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:53:04,507 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HStore(1540): 20372a9c124cb3132bd555bb6aecb7c7/info is initiating minor compaction (all files) 2024-12-16T18:53:04,507 INFO [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20372a9c124cb3132bd555bb6aecb7c7/info in TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:04,507 INFO [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71] into tmpdir=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp, totalSize=36.6 K 2024-12-16T18:53:04,509 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] compactions.Compactor(224): Compacting eddcd8c2fddb45a881fea20844398fc9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1734375116825 2024-12-16T18:53:04,509 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] compactions.Compactor(224): Compacting db08c69aa0884c1e94458a866b04e276, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1734375130862 2024-12-16T18:53:04,510 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b8aaaf62b6643fc9db0479b80985d71, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1734375145909 2024-12-16T18:53:04,539 INFO [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20372a9c124cb3132bd555bb6aecb7c7#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:53:04,540 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/e0aed6d8f2214e5cb61ece53fa18a669 is 1080, key is row0001/info:/1734375116825/Put/seqid=0 2024-12-16T18:53:04,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741848_1024 (size=27710) 2024-12-16T18:53:04,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741848_1024 (size=27710) 2024-12-16T18:53:04,559 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/e0aed6d8f2214e5cb61ece53fa18a669 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/e0aed6d8f2214e5cb61ece53fa18a669 2024-12-16T18:53:09,444 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:09,444 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:09,445 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375169413 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375179426 2024-12-16T18:53:09,445 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:53:09,445 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375169413 is not closed yet, will try archiving it next time 2024-12-16T18:53:09,445 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375136876 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375136876 2024-12-16T18:53:09,446 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375189445 2024-12-16T18:53:09,448 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375189445) roll requested 2024-12-16T18:53:09,448 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375148316 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375148316 2024-12-16T18:53:09,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741845_1021 (size=438) 2024-12-16T18:53:09,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741845_1021 (size=438) 2024-12-16T18:53:09,451 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375159385 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375159385 2024-12-16T18:53:09,456 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375169413 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375169413 2024-12-16T18:53:11,183 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:53:14,446 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:14,446 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:14,448 INFO [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20372a9c124cb3132bd555bb6aecb7c7/info of 20372a9c124cb3132bd555bb6aecb7c7 into e0aed6d8f2214e5cb61ece53fa18a669(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-16T18:53:14,448 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:53:14,448 INFO [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7., storeName=20372a9c124cb3132bd555bb6aecb7c7/info, priority=13, startTime=1734375184502; duration=9sec 2024-12-16T18:53:14,449 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-16T18:53:14,449 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:53:14,449 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/e0aed6d8f2214e5cb61ece53fa18a669 because midkey is the same as first or last row 2024-12-16T18:53:14,449 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:53:14,449 DEBUG [RS:0;7a6e4bf70377:37043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20372a9c124cb3132bd555bb6aecb7c7:info 2024-12-16T18:53:14,478 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:14,478 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37003,DS-43ee75b6-3091-4436-95e8-a3fc73c4e385,DISK], DatanodeInfoWithStorage[127.0.0.1:36489,DS-33a6bd99-4f35-4aa7-b1c0-e484f7c1f09b,DISK]] 2024-12-16T18:53:14,479 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375179426 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375189445 2024-12-16T18:53:14,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:53:14,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375179426 is not closed yet, will try archiving it next time 2024-12-16T18:53:14,479 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375194479 2024-12-16T18:53:14,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741847_1023 (size=539) 2024-12-16T18:53:14,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741847_1023 (size=539) 2024-12-16T18:53:14,482 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375179426 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375179426 2024-12-16T18:53:14,490 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375189445 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375194479 2024-12-16T18:53:14,490 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34965:34965),(127.0.0.1/127.0.0.1:41133:41133)] 2024-12-16T18:53:14,490 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375189445 is not closed yet, will try archiving it next time 2024-12-16T18:53:14,490 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375194479) roll requested 2024-12-16T18:53:14,491 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C37043%2C1734375102890.1734375194491 2024-12-16T18:53:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741849_1025 (size=1258) 2024-12-16T18:53:14,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741849_1025 (size=1258) 2024-12-16T18:53:14,499 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375194479 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375194491 2024-12-16T18:53:14,499 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41133:41133),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-16T18:53:14,499 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375194479 is not closed yet, will try archiving it next time 2024-12-16T18:53:14,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741850_1026 (size=93) 2024-12-16T18:53:14,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741850_1026 (size=93) 2024-12-16T18:53:14,502 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890/7a6e4bf70377%2C37043%2C1734375102890.1734375194479 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs/7a6e4bf70377%2C37043%2C1734375102890.1734375194479 2024-12-16T18:53:17,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 20372a9c124cb3132bd555bb6aecb7c7, had cached 0 bytes from a total of 27710 2024-12-16T18:53:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37043 {}] regionserver.HRegion(8581): Flush requested on 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:53:26,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20372a9c124cb3132bd555bb6aecb7c7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:53:26,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/94789f8beedd4b5daecda84912035b25 is 1080, key is row0022/info:/1734375194480/Put/seqid=0 2024-12-16T18:53:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741852_1028 (size=12509) 2024-12-16T18:53:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741852_1028 (size=12509) 2024-12-16T18:53:26,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/94789f8beedd4b5daecda84912035b25 2024-12-16T18:53:26,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/94789f8beedd4b5daecda84912035b25 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/94789f8beedd4b5daecda84912035b25 2024-12-16T18:53:26,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/94789f8beedd4b5daecda84912035b25, entries=7, sequenceid=42, filesize=12.2 K 2024-12-16T18:53:26,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20372a9c124cb3132bd555bb6aecb7c7 in 36ms, sequenceid=42, compaction requested=false 2024-12-16T18:53:26,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:53:26,540 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-16T18:53:26,540 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:53:26,540 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/e0aed6d8f2214e5cb61ece53fa18a669 because midkey is the same as first or last row 2024-12-16T18:53:34,516 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:53:34,517 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T18:53:34,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1bfed419 to 127.0.0.1:61737 2024-12-16T18:53:34,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:53:34,518 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:53:34,519 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=791946289, stopped=false 2024-12-16T18:53:34,519 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,35675,1734375102108 2024-12-16T18:53:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:53:34,530 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:53:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:53:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:34,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:34,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:53:34,530 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,37043,1734375102890' ***** 2024-12-16T18:53:34,530 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:53:34,530 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:53:34,531 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:53:34,531 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:53:34,531 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:53:34,531 INFO [RS:0;7a6e4bf70377:37043 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:53:34,531 INFO [RS:0;7a6e4bf70377:37043 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:53:34,531 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(3579): Received CLOSE for 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:53:34,531 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(3579): Received CLOSE for 20372a9c124cb3132bd555bb6aecb7c7 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,37043,1734375102890 2024-12-16T18:53:34,532 DEBUG [RS:0;7a6e4bf70377:37043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 737b252b3c17601ff08eca64ce3fd6e7, disabling compactions & flushes 2024-12-16T18:53:34,532 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:53:34,532 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. after waiting 0 ms 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:53:34,532 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1603): Online Regions={737b252b3c17601ff08eca64ce3fd6e7=hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7., 1588230740=hbase:meta,,1.1588230740, 20372a9c124cb3132bd555bb6aecb7c7=TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.} 2024-12-16T18:53:34,532 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 737b252b3c17601ff08eca64ce3fd6e7 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:53:34,532 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:53:34,532 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:53:34,533 DEBUG [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 20372a9c124cb3132bd555bb6aecb7c7, 737b252b3c17601ff08eca64ce3fd6e7 2024-12-16T18:53:34,533 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-16T18:53:34,550 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/.tmp/info/b5abe58f183b4bccb3b23d15d7b61b8a is 45, key is default/info:d/1734375106313/Put/seqid=0 2024-12-16T18:53:34,552 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/info/51a14b6ef0ad4e86982e8e524b056573 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7./info:regioninfo/1734375107818/Put/seqid=0 2024-12-16T18:53:34,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741853_1029 (size=5037) 2024-12-16T18:53:34,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741853_1029 (size=5037) 2024-12-16T18:53:34,558 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/.tmp/info/b5abe58f183b4bccb3b23d15d7b61b8a 2024-12-16T18:53:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741854_1030 (size=8172) 2024-12-16T18:53:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741854_1030 (size=8172) 2024-12-16T18:53:34,560 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/info/51a14b6ef0ad4e86982e8e524b056573 2024-12-16T18:53:34,568 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/.tmp/info/b5abe58f183b4bccb3b23d15d7b61b8a as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/info/b5abe58f183b4bccb3b23d15d7b61b8a 2024-12-16T18:53:34,576 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/info/b5abe58f183b4bccb3b23d15d7b61b8a, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T18:53:34,577 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 737b252b3c17601ff08eca64ce3fd6e7 in 45ms, sequenceid=6, compaction requested=false 2024-12-16T18:53:34,583 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/namespace/737b252b3c17601ff08eca64ce3fd6e7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T18:53:34,585 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/table/f9d9a889bcfa42c0b33d1203b1bfc82b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1734375107841/Put/seqid=0 2024-12-16T18:53:34,586 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 737b252b3c17601ff08eca64ce3fd6e7: 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375105584.737b252b3c17601ff08eca64ce3fd6e7. 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 20372a9c124cb3132bd555bb6aecb7c7, disabling compactions & flushes 2024-12-16T18:53:34,586 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. after waiting 0 ms 2024-12-16T18:53:34,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:34,586 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 20372a9c124cb3132bd555bb6aecb7c7 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-16T18:53:34,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741855_1031 (size=5452) 2024-12-16T18:53:34,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741855_1031 (size=5452) 2024-12-16T18:53:34,591 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/table/f9d9a889bcfa42c0b33d1203b1bfc82b 2024-12-16T18:53:34,592 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/769be6fe30374610b64aaa4abfa5d147 is 1080, key is row0029/info:/1734375208507/Put/seqid=0 2024-12-16T18:53:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741856_1032 (size=8193) 2024-12-16T18:53:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741856_1032 (size=8193) 2024-12-16T18:53:34,598 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/769be6fe30374610b64aaa4abfa5d147 2024-12-16T18:53:34,599 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/info/51a14b6ef0ad4e86982e8e524b056573 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/info/51a14b6ef0ad4e86982e8e524b056573 2024-12-16T18:53:34,607 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/.tmp/info/769be6fe30374610b64aaa4abfa5d147 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/769be6fe30374610b64aaa4abfa5d147 2024-12-16T18:53:34,607 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/info/51a14b6ef0ad4e86982e8e524b056573, entries=20, sequenceid=14, filesize=8.0 K 2024-12-16T18:53:34,609 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/.tmp/table/f9d9a889bcfa42c0b33d1203b1bfc82b as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/table/f9d9a889bcfa42c0b33d1203b1bfc82b 2024-12-16T18:53:34,614 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/769be6fe30374610b64aaa4abfa5d147, entries=3, sequenceid=48, filesize=8.0 K 2024-12-16T18:53:34,616 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 20372a9c124cb3132bd555bb6aecb7c7 in 29ms, sequenceid=48, compaction requested=true 2024-12-16T18:53:34,616 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/table/f9d9a889bcfa42c0b33d1203b1bfc82b, entries=4, sequenceid=14, filesize=5.3 K 2024-12-16T18:53:34,616 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71] to archive 2024-12-16T18:53:34,617 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 85ms, sequenceid=14, compaction requested=false 2024-12-16T18:53:34,620 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T18:53:34,623 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-16T18:53:34,624 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/eddcd8c2fddb45a881fea20844398fc9 2024-12-16T18:53:34,624 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/7b8aaaf62b6643fc9db0479b80985d71 2024-12-16T18:53:34,624 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:53:34,624 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276 to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/info/db08c69aa0884c1e94458a866b04e276 2024-12-16T18:53:34,624 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:53:34,625 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:53:34,625 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T18:53:34,631 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:53:34,640 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/data/default/TestLogRolling-testSlowSyncLogRolling/20372a9c124cb3132bd555bb6aecb7c7/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-16T18:53:34,641 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:34,641 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 20372a9c124cb3132bd555bb6aecb7c7: 2024-12-16T18:53:34,641 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1734375106764.20372a9c124cb3132bd555bb6aecb7c7. 2024-12-16T18:53:34,690 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-16T18:53:34,690 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-16T18:53:34,733 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,37043,1734375102890; all regions closed. 2024-12-16T18:53:34,734 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890 2024-12-16T18:53:34,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741834_1010 (size=4330) 2024-12-16T18:53:34,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741834_1010 (size=4330) 2024-12-16T18:53:34,740 DEBUG [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs 2024-12-16T18:53:34,740 INFO [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C37043%2C1734375102890.meta:.meta(num 1734375105263) 2024-12-16T18:53:34,740 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/WALs/7a6e4bf70377,37043,1734375102890 2024-12-16T18:53:34,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741851_1027 (size=13066) 2024-12-16T18:53:34,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741851_1027 (size=13066) 2024-12-16T18:53:34,748 DEBUG [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/oldWALs 2024-12-16T18:53:34,748 INFO [RS:0;7a6e4bf70377:37043 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C37043%2C1734375102890:(num 1734375194491) 2024-12-16T18:53:34,748 DEBUG [RS:0;7a6e4bf70377:37043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:53:34,748 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:53:34,749 INFO [RS:0;7a6e4bf70377:37043 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T18:53:34,749 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:53:34,749 INFO [RS:0;7a6e4bf70377:37043 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37043 2024-12-16T18:53:34,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,37043,1734375102890 2024-12-16T18:53:34,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:53:34,772 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,37043,1734375102890] 2024-12-16T18:53:34,772 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,37043,1734375102890; numProcessing=1 2024-12-16T18:53:34,780 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,37043,1734375102890 already deleted, retry=false 2024-12-16T18:53:34,780 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,37043,1734375102890 expired; onlineServers=0 2024-12-16T18:53:34,780 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,35675,1734375102108' ***** 2024-12-16T18:53:34,780 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:53:34,780 DEBUG [M:0;7a6e4bf70377:35675 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2788a8c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:53:34,780 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,35675,1734375102108 2024-12-16T18:53:34,780 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,35675,1734375102108; all regions closed. 2024-12-16T18:53:34,780 DEBUG [M:0;7a6e4bf70377:35675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:53:34,780 DEBUG [M:0;7a6e4bf70377:35675 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:53:34,781 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:53:34,781 DEBUG [M:0;7a6e4bf70377:35675 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:53:34,781 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375104431 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375104431,5,FailOnTimeoutGroup] 2024-12-16T18:53:34,781 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375104431 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375104431,5,FailOnTimeoutGroup] 2024-12-16T18:53:34,781 INFO [M:0;7a6e4bf70377:35675 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:53:34,781 DEBUG [M:0;7a6e4bf70377:35675 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:53:34,781 INFO [M:0;7a6e4bf70377:35675 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:53:34,782 INFO [M:0;7a6e4bf70377:35675 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:53:34,782 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:53:34,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:53:34,788 DEBUG [M:0;7a6e4bf70377:35675 {}] zookeeper.ZKUtil(347): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:53:34,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:34,788 WARN [M:0;7a6e4bf70377:35675 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:53:34,788 INFO [M:0;7a6e4bf70377:35675 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:53:34,788 INFO [M:0;7a6e4bf70377:35675 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:53:34,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:53:34,789 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:53:34,789 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:34,789 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:34,789 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:53:34,789 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:34,789 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.21 KB heapSize=50.14 KB 2024-12-16T18:53:34,814 DEBUG [M:0;7a6e4bf70377:35675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4546b4f5570e4972ae7c184adf476dbe is 82, key is hbase:meta,,1/info:regioninfo/1734375105420/Put/seqid=0 2024-12-16T18:53:34,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741857_1033 (size=5672) 2024-12-16T18:53:34,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741857_1033 (size=5672) 2024-12-16T18:53:34,823 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4546b4f5570e4972ae7c184adf476dbe 2024-12-16T18:53:34,852 DEBUG [M:0;7a6e4bf70377:35675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9481af902b44fd08ebb8d3e46087c92 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734375107931/Put/seqid=0 2024-12-16T18:53:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741858_1034 (size=6426) 2024-12-16T18:53:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741858_1034 (size=6426) 2024-12-16T18:53:34,860 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.61 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9481af902b44fd08ebb8d3e46087c92 2024-12-16T18:53:34,867 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b9481af902b44fd08ebb8d3e46087c92 2024-12-16T18:53:34,872 INFO [RS:0;7a6e4bf70377:37043 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,37043,1734375102890; zookeeper connection closed. 2024-12-16T18:53:34,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:53:34,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37043-0x100e451d1ff0001, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:53:34,872 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fb8b0cc {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fb8b0cc 2024-12-16T18:53:34,872 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T18:53:34,883 DEBUG [M:0;7a6e4bf70377:35675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e7408661ea64e4a893bdca15b8ab313 is 69, key is 7a6e4bf70377,37043,1734375102890/rs:state/1734375104504/Put/seqid=0 2024-12-16T18:53:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741859_1035 (size=5156) 2024-12-16T18:53:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741859_1035 (size=5156) 2024-12-16T18:53:34,890 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e7408661ea64e4a893bdca15b8ab313 2024-12-16T18:53:34,919 DEBUG [M:0;7a6e4bf70377:35675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/04171603b4e44ed0a93845e1fbd10188 is 52, key is load_balancer_on/state:d/1734375106684/Put/seqid=0 2024-12-16T18:53:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741860_1036 (size=5056) 2024-12-16T18:53:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741860_1036 (size=5056) 2024-12-16T18:53:34,926 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/04171603b4e44ed0a93845e1fbd10188 2024-12-16T18:53:34,933 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4546b4f5570e4972ae7c184adf476dbe as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4546b4f5570e4972ae7c184adf476dbe 2024-12-16T18:53:34,940 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4546b4f5570e4972ae7c184adf476dbe, entries=8, sequenceid=104, filesize=5.5 K 2024-12-16T18:53:34,942 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b9481af902b44fd08ebb8d3e46087c92 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b9481af902b44fd08ebb8d3e46087c92 2024-12-16T18:53:34,949 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b9481af902b44fd08ebb8d3e46087c92 2024-12-16T18:53:34,949 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b9481af902b44fd08ebb8d3e46087c92, entries=11, sequenceid=104, filesize=6.3 K 2024-12-16T18:53:34,951 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e7408661ea64e4a893bdca15b8ab313 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e7408661ea64e4a893bdca15b8ab313 2024-12-16T18:53:34,958 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e7408661ea64e4a893bdca15b8ab313, entries=1, sequenceid=104, filesize=5.0 K 2024-12-16T18:53:34,959 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/04171603b4e44ed0a93845e1fbd10188 as hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/04171603b4e44ed0a93845e1fbd10188 2024-12-16T18:53:34,966 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/04171603b4e44ed0a93845e1fbd10188, entries=1, sequenceid=104, filesize=4.9 K 2024-12-16T18:53:34,968 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.21 KB/41173, heapSize ~50.08 KB/51280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 179ms, sequenceid=104, compaction requested=false 2024-12-16T18:53:34,973 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:34,973 DEBUG [M:0;7a6e4bf70377:35675 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:53:34,974 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/MasterData/WALs/7a6e4bf70377,35675,1734375102108 2024-12-16T18:53:34,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37003 is added to blk_1073741830_1006 (size=48474) 2024-12-16T18:53:34,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36489 is added to blk_1073741830_1006 (size=48474) 2024-12-16T18:53:35,378 INFO [M:0;7a6e4bf70377:35675 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:53:35,378 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:53:35,378 INFO [M:0;7a6e4bf70377:35675 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35675 2024-12-16T18:53:35,438 DEBUG [M:0;7a6e4bf70377:35675 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,35675,1734375102108 already deleted, retry=false 2024-12-16T18:53:35,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:53:35,547 INFO [M:0;7a6e4bf70377:35675 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,35675,1734375102108; zookeeper connection closed. 2024-12-16T18:53:35,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35675-0x100e451d1ff0000, quorum=127.0.0.1:61737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:53:35,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e5af57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:35,560 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68aaa55{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:53:35,561 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:53:35,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f9d2cc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:53:35,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22a0043c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,STOPPED} 2024-12-16T18:53:35,564 WARN [BP-1771638224-172.17.0.2-1734375097902 heartbeating to localhost/127.0.0.1:33663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:53:35,564 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:53:35,564 WARN [BP-1771638224-172.17.0.2-1734375097902 heartbeating to localhost/127.0.0.1:33663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1771638224-172.17.0.2-1734375097902 (Datanode Uuid 1de1932b-ec13-49a0-8d1e-a5962bf3c8e3) service to localhost/127.0.0.1:33663 2024-12-16T18:53:35,564 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:53:35,565 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data3/current/BP-1771638224-172.17.0.2-1734375097902 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:35,566 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data4/current/BP-1771638224-172.17.0.2-1734375097902 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:35,566 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:53:35,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64b46cf2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:35,569 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1872bb43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:53:35,569 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:53:35,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44404b82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:53:35,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e40bccb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,STOPPED} 2024-12-16T18:53:35,570 WARN [BP-1771638224-172.17.0.2-1734375097902 heartbeating to localhost/127.0.0.1:33663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:53:35,570 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:53:35,570 WARN [BP-1771638224-172.17.0.2-1734375097902 heartbeating to localhost/127.0.0.1:33663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1771638224-172.17.0.2-1734375097902 (Datanode Uuid 809d6397-4d0c-4e10-bb08-068043b319c4) service to localhost/127.0.0.1:33663 2024-12-16T18:53:35,570 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:53:35,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data1/current/BP-1771638224-172.17.0.2-1734375097902 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:35,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/cluster_d6671651-c3d0-f9c5-46db-9d19478ec800/dfs/data/data2/current/BP-1771638224-172.17.0.2-1734375097902 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:35,572 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:53:35,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@805400f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:53:35,586 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6567b82c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:53:35,586 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:53:35,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fabcd45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:53:35,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17bd5227{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir/,STOPPED} 2024-12-16T18:53:35,594 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:53:35,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:53:35,638 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=64 (was 12) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33663 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7a6e4bf70377:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@6a3564d2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:33663 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:33663 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33663 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:33663 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=406 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=480 (was 977), ProcessCount=11 (was 11), AvailableMemoryMB=4295 (was 4576) 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=65, OpenFileDescriptor=406, MaxFileDescriptor=1048576, SystemLoadAverage=480, ProcessCount=11, AvailableMemoryMB=4295 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.log.dir so I do NOT create it in target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5feff81f-9955-0afc-7a77-e0b9f4c287af/hadoop.tmp.dir so I do NOT create it in target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361, deleteOnExit=true 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/test.cache.data in system properties and HBase conf 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:53:35,646 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:53:35,647 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:53:35,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:53:35,648 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:53:35,659 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:53:35,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:35,946 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:35,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:35,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:35,947 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:53:35,948 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:35,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@309e2ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:35,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2be1c732{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:36,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24064364{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-46825-hadoop-hdfs-3_4_1-tests_jar-_-any-2673558320200178635/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:53:36,045 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72d4a98{HTTP/1.1, (http/1.1)}{localhost:46825} 2024-12-16T18:53:36,046 INFO [Time-limited test {}] server.Server(415): Started @120412ms 2024-12-16T18:53:36,061 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:53:36,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:36,255 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:36,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:36,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:36,256 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:53:36,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@767293e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:36,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@410ad4c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:36,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aa16c5a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-46115-hadoop-hdfs-3_4_1-tests_jar-_-any-2807376603351913134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:36,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ae2b76d{HTTP/1.1, (http/1.1)}{localhost:46115} 2024-12-16T18:53:36,363 INFO [Time-limited test {}] server.Server(415): Started @120729ms 2024-12-16T18:53:36,364 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:53:36,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:36,439 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:36,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:36,455 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:36,456 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:53:36,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7920de13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:36,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4496df05{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:36,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41690fa2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-34285-hadoop-hdfs-3_4_1-tests_jar-_-any-12534999437793518648/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:36,581 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e758761{HTTP/1.1, (http/1.1)}{localhost:34285} 2024-12-16T18:53:36,581 INFO [Time-limited test {}] server.Server(415): Started @120947ms 2024-12-16T18:53:36,583 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:53:37,247 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data2/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:37,247 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data1/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:37,279 WARN [Thread-428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:53:37,282 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe782c2252b1e19eb with lease ID 0x6104860f8492c41e: Processing first storage report for DS-0f66e070-8957-4826-9a9d-a92f0301cce9 from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=5a41e41c-25c9-4eca-a408-66279dbfb687, infoPort=36383, infoSecurePort=0, ipcPort=40389, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:37,282 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe782c2252b1e19eb with lease ID 0x6104860f8492c41e: from storage DS-0f66e070-8957-4826-9a9d-a92f0301cce9 node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=5a41e41c-25c9-4eca-a408-66279dbfb687, infoPort=36383, infoSecurePort=0, ipcPort=40389, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:37,283 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe782c2252b1e19eb with lease ID 0x6104860f8492c41e: Processing first storage report for DS-2c15698c-97ad-445a-935e-422fd613a6da from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=5a41e41c-25c9-4eca-a408-66279dbfb687, infoPort=36383, infoSecurePort=0, ipcPort=40389, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:37,283 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe782c2252b1e19eb with lease ID 0x6104860f8492c41e: from storage DS-2c15698c-97ad-445a-935e-422fd613a6da node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=5a41e41c-25c9-4eca-a408-66279dbfb687, infoPort=36383, infoSecurePort=0, ipcPort=40389, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:37,510 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data4/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:37,510 WARN [Thread-475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data3/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:37,530 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:53:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cb6b83dfd8e88c0 with lease ID 0x6104860f8492c41f: Processing first storage report for DS-8ca290d4-c640-4beb-aff9-862f88137c9c from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=38779, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cb6b83dfd8e88c0 with lease ID 0x6104860f8492c41f: from storage DS-8ca290d4-c640-4beb-aff9-862f88137c9c node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=38779, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cb6b83dfd8e88c0 with lease ID 0x6104860f8492c41f: Processing first storage report for DS-340dfb35-c2df-4844-b569-7fb2eae00d76 from datanode DatanodeRegistration(127.0.0.1:46877, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=38779, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cb6b83dfd8e88c0 with lease ID 0x6104860f8492c41f: from storage DS-340dfb35-c2df-4844-b569-7fb2eae00d76 node DatanodeRegistration(127.0.0.1:46877, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=38779, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:37,538 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f 2024-12-16T18:53:37,548 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/zookeeper_0, clientPort=53238, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:53:37,554 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=53238 2024-12-16T18:53:37,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:37,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:37,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:53:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:53:37,982 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 with version=8 2024-12-16T18:53:37,982 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:53:37,984 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:53:37,985 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:53:37,986 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36083 2024-12-16T18:53:37,987 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:37,989 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:37,992 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36083 connecting to ZooKeeper ensemble=127.0.0.1:53238 2024-12-16T18:53:38,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360830x0, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:53:38,072 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36083-0x100e45398430000 connected 2024-12-16T18:53:38,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:53:38,181 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:53:38,182 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:53:38,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-16T18:53:38,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36083 2024-12-16T18:53:38,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36083 2024-12-16T18:53:38,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-16T18:53:38,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-16T18:53:38,194 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20, hbase.cluster.distributed=false 2024-12-16T18:53:38,214 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:53:38,215 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:53:38,216 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:53:38,219 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45905 2024-12-16T18:53:38,220 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:53:38,233 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:53:38,235 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:38,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:38,245 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45905 connecting to ZooKeeper ensemble=127.0.0.1:53238 2024-12-16T18:53:38,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459050x0, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:53:38,269 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459050x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:53:38,270 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459050x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:53:38,271 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459050x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:53:38,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45905-0x100e45398430001 connected 2024-12-16T18:53:38,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45905 2024-12-16T18:53:38,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45905 2024-12-16T18:53:38,293 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45905 2024-12-16T18:53:38,294 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45905 2024-12-16T18:53:38,294 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45905 2024-12-16T18:53:38,298 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:53:38,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:53:38,317 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,321 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:36083 2024-12-16T18:53:38,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:53:38,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,329 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:53:38,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:53:38,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,336 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,36083,1734375217984 from backup master directory 2024-12-16T18:53:38,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:53:38,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:53:38,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:53:38,347 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:53:38,347 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:53:38,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:53:38,386 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/hbase.id with ID: 9bfc7049-9725-47f5-9209-7c9f26f82cde 2024-12-16T18:53:38,418 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:38,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:53:38,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:53:38,478 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:53:38,479 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:53:38,480 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:53:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:53:38,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:53:38,511 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store 2024-12-16T18:53:38,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:53:38,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:53:38,553 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:38,554 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:53:38,554 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:38,554 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:38,554 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:53:38,554 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:38,554 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:53:38,554 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:53:38,556 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/.initializing 2024-12-16T18:53:38,556 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,570 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C36083%2C1734375217984, suffix=, logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984, archiveDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/oldWALs, maxLogs=10 2024-12-16T18:53:38,574 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C36083%2C1734375217984.1734375218574 2024-12-16T18:53:38,609 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 2024-12-16T18:53:38,609 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:38779:38779)] 2024-12-16T18:53:38,609 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:53:38,609 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:38,610 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,610 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:53:38,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:38,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:38,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:53:38,621 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:38,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:53:38,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:53:38,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:38,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:53:38,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:53:38,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:38,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:53:38,637 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,638 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,642 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:53:38,644 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:53:38,655 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:53:38,656 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739797, jitterRate=-0.05930030345916748}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:53:38,657 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:53:38,658 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:53:38,665 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63617228, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:53:38,669 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:53:38,669 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:53:38,669 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:53:38,669 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:53:38,670 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-16T18:53:38,670 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-16T18:53:38,670 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:53:38,678 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:53:38,687 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:53:38,697 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:53:38,697 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:53:38,700 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:53:38,711 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:53:38,712 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:53:38,717 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:53:38,728 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:53:38,730 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:53:38,736 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:53:38,738 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:53:38,744 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:53:38,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:53:38,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:53:38,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,755 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,36083,1734375217984, sessionid=0x100e45398430000, setting cluster-up flag (Was=false) 2024-12-16T18:53:38,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,828 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:53:38,834 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:38,886 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:53:38,887 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:38,891 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:53:38,891 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:53:38,891 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:53:38,891 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,36083,1734375217984 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:53:38,891 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:53:38,891 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:53:38,891 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:53:38,892 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:53:38,892 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:53:38,892 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:38,892 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:53:38,892 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:38,899 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:53:38,899 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:53:38,901 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:38,902 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:53:38,921 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375248921 2024-12-16T18:53:38,921 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:53:38,922 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:53:38,922 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:53:38,922 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:53:38,922 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:53:38,922 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:53:38,934 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:38,938 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:53:38,938 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:53:38,939 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:53:38,939 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:53:38,939 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:53:38,941 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:45905 2024-12-16T18:53:38,942 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1008): ClusterId : 9bfc7049-9725-47f5-9209-7c9f26f82cde 2024-12-16T18:53:38,943 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:53:38,947 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375218939,5,FailOnTimeoutGroup] 2024-12-16T18:53:38,947 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375218947,5,FailOnTimeoutGroup] 2024-12-16T18:53:38,947 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:38,947 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:53:38,947 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:38,947 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:53:38,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:53:38,957 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:53:38,957 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:53:38,958 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:53:38,958 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 2024-12-16T18:53:38,970 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:53:38,971 DEBUG [RS:0;7a6e4bf70377:45905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c25c5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:53:38,986 DEBUG [RS:0;7a6e4bf70377:45905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76dbc824, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:53:38,986 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:53:38,986 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:53:38,986 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:53:38,987 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,36083,1734375217984 with isa=7a6e4bf70377/172.17.0.2:45905, startcode=1734375218213 2024-12-16T18:53:38,987 DEBUG [RS:0;7a6e4bf70377:45905 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:53:38,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:53:38,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:53:39,000 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51071, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:53:39,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:39,001 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,001 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,003 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 2024-12-16T18:53:39,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:53:39,003 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35231 2024-12-16T18:53:39,003 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:53:39,007 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:53:39,007 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:53:39,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:53:39,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:53:39,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:53:39,012 DEBUG [RS:0;7a6e4bf70377:45905 {}] zookeeper.ZKUtil(111): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,012 WARN [RS:0;7a6e4bf70377:45905 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:53:39,012 INFO [RS:0;7a6e4bf70377:45905 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:53:39,012 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,45905,1734375218213] 2024-12-16T18:53:39,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:53:39,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,015 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/1588230740 2024-12-16T18:53:39,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/1588230740 2024-12-16T18:53:39,018 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:53:39,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:53:39,032 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:53:39,032 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:53:39,033 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:53:39,034 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824387, jitterRate=0.048263102769851685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:53:39,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:53:39,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:53:39,036 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:53:39,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:53:39,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:53:39,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:53:39,036 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:53:39,036 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:53:39,036 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:53:39,037 INFO [RS:0;7a6e4bf70377:45905 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:53:39,037 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,038 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:53:39,038 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:53:39,038 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:53:39,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:53:39,040 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,040 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,041 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,042 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,042 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:39,042 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:53:39,042 DEBUG [RS:0;7a6e4bf70377:45905 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:53:39,042 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:53:39,066 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,066 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,066 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,066 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,066 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45905,1734375218213-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:53:39,093 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:53:39,093 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45905,1734375218213-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,112 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.Replication(204): 7a6e4bf70377,45905,1734375218213 started 2024-12-16T18:53:39,113 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,45905,1734375218213, RpcServer on 7a6e4bf70377/172.17.0.2:45905, sessionid=0x100e45398430001 2024-12-16T18:53:39,113 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:53:39,113 DEBUG [RS:0;7a6e4bf70377:45905 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,113 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,45905,1734375218213' 2024-12-16T18:53:39,113 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,45905,1734375218213' 2024-12-16T18:53:39,114 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:53:39,115 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:53:39,115 DEBUG [RS:0;7a6e4bf70377:45905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:53:39,115 INFO [RS:0;7a6e4bf70377:45905 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:53:39,115 INFO [RS:0;7a6e4bf70377:45905 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:53:39,192 WARN [7a6e4bf70377:36083 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T18:53:39,219 INFO [RS:0;7a6e4bf70377:45905 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C45905%2C1734375218213, suffix=, logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213, archiveDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs, maxLogs=32 2024-12-16T18:53:39,222 INFO [RS:0;7a6e4bf70377:45905 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375219221 2024-12-16T18:53:39,236 INFO [RS:0;7a6e4bf70377:45905 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 2024-12-16T18:53:39,236 DEBUG [RS:0;7a6e4bf70377:45905 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38779:38779),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-16T18:53:39,443 DEBUG [7a6e4bf70377:36083 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:53:39,443 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,445 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,45905,1734375218213, state=OPENING 2024-12-16T18:53:39,486 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:53:39,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:39,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:39,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,45905,1734375218213}] 2024-12-16T18:53:39,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:53:39,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:53:39,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:39,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:39,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,649 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:53:39,653 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:53:39,663 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:53:39,663 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:53:39,666 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C45905%2C1734375218213.meta, suffix=.meta, logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213, archiveDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs, maxLogs=32 2024-12-16T18:53:39,669 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta 2024-12-16T18:53:39,688 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta 2024-12-16T18:53:39,688 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38779:38779),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-16T18:53:39,688 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:53:39,688 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:53:39,689 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:53:39,689 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:53:39,689 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:53:39,689 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:39,689 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:53:39,689 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:53:39,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:53:39,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:53:39,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:53:39,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:53:39,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:53:39,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:53:39,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:53:39,701 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/1588230740 2024-12-16T18:53:39,703 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/meta/1588230740 2024-12-16T18:53:39,705 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:53:39,708 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:53:39,709 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740860, jitterRate=-0.05794800817966461}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:53:39,710 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:53:39,711 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375219648 2024-12-16T18:53:39,715 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:53:39,715 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:53:39,716 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:39,717 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,45905,1734375218213, state=OPEN 2024-12-16T18:53:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:53:39,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:53:39,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:53:39,744 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:53:39,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:53:39,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,45905,1734375218213 in 249 msec 2024-12-16T18:53:39,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:53:39,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 709 msec 2024-12-16T18:53:39,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 862 msec 2024-12-16T18:53:39,753 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375219753, completionTime=-1 2024-12-16T18:53:39,753 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:53:39,753 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:53:39,754 DEBUG [hconnection-0x32f33d35-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:53:39,756 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:53:39,757 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:53:39,757 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375279757 2024-12-16T18:53:39,757 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375339757 2024-12-16T18:53:39,757 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-16T18:53:39,778 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,778 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,778 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,779 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:36083, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,779 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:39,779 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:53:39,779 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:53:39,781 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:53:39,786 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:53:39,788 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:53:39,788 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:39,790 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:53:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:53:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:53:39,807 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0d6a1a3a8642b152dd163e8eaa56f05b, NAME => 'hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 2024-12-16T18:53:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:53:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:53:39,820 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:39,821 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 0d6a1a3a8642b152dd163e8eaa56f05b, disabling compactions & flushes 2024-12-16T18:53:39,821 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:39,821 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:39,821 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. after waiting 0 ms 2024-12-16T18:53:39,821 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:39,821 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:39,821 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0d6a1a3a8642b152dd163e8eaa56f05b: 2024-12-16T18:53:39,823 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:53:39,823 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375219823"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375219823"}]},"ts":"1734375219823"} 2024-12-16T18:53:39,826 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:53:39,827 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:53:39,828 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375219827"}]},"ts":"1734375219827"} 2024-12-16T18:53:39,830 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:53:39,870 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=0d6a1a3a8642b152dd163e8eaa56f05b, ASSIGN}] 2024-12-16T18:53:39,871 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=0d6a1a3a8642b152dd163e8eaa56f05b, ASSIGN 2024-12-16T18:53:39,873 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=0d6a1a3a8642b152dd163e8eaa56f05b, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45905,1734375218213; forceNewPlan=false, retain=false 2024-12-16T18:53:40,023 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=0d6a1a3a8642b152dd163e8eaa56f05b, regionState=OPENING, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:40,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 0d6a1a3a8642b152dd163e8eaa56f05b, server=7a6e4bf70377,45905,1734375218213}] 2024-12-16T18:53:40,108 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:53:40,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:40,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:40,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:40,186 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:40,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 0d6a1a3a8642b152dd163e8eaa56f05b, NAME => 'hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:53:40,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:40,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,188 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,189 INFO [StoreOpener-0d6a1a3a8642b152dd163e8eaa56f05b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,192 INFO [StoreOpener-0d6a1a3a8642b152dd163e8eaa56f05b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d6a1a3a8642b152dd163e8eaa56f05b columnFamilyName info 2024-12-16T18:53:40,192 DEBUG [StoreOpener-0d6a1a3a8642b152dd163e8eaa56f05b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:40,193 INFO [StoreOpener-0d6a1a3a8642b152dd163e8eaa56f05b-1 {}] regionserver.HStore(327): Store=0d6a1a3a8642b152dd163e8eaa56f05b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:53:40,194 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,195 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,197 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:53:40,200 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:53:40,200 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 0d6a1a3a8642b152dd163e8eaa56f05b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860122, jitterRate=0.09370294213294983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:53:40,201 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 0d6a1a3a8642b152dd163e8eaa56f05b: 2024-12-16T18:53:40,202 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b., pid=6, masterSystemTime=1734375220180 2024-12-16T18:53:40,204 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:40,205 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:53:40,205 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=0d6a1a3a8642b152dd163e8eaa56f05b, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:40,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:53:40,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 0d6a1a3a8642b152dd163e8eaa56f05b, server=7a6e4bf70377,45905,1734375218213 in 182 msec 2024-12-16T18:53:40,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:53:40,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=0d6a1a3a8642b152dd163e8eaa56f05b, ASSIGN in 342 msec 2024-12-16T18:53:40,217 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:53:40,217 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375220217"}]},"ts":"1734375220217"} 2024-12-16T18:53:40,219 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:53:40,229 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:53:40,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 450 msec 2024-12-16T18:53:40,290 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:53:40,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:53:40,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:40,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:53:40,302 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:53:40,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:53:40,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 36 msec 2024-12-16T18:53:40,345 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:53:40,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:53:40,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 26 msec 2024-12-16T18:53:40,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:53:40,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:53:40,411 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.064sec 2024-12-16T18:53:40,411 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:53:40,411 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:53:40,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:53:40,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:53:40,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:53:40,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:53:40,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:53:40,415 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:53:40,415 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:53:40,416 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36083,1734375217984-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,424 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bafdd00 to 127.0.0.1:53238 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63d1b8ea 2024-12-16T18:53:40,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@125ff5a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:53:40,446 DEBUG [hconnection-0x4707bca2-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:53:40,449 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:53:40,451 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,36083,1734375217984 2024-12-16T18:53:40,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:40,456 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:53:40,473 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:53:40,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:40,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:40,473 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:53:40,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:53:40,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:53:40,474 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:53:40,474 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:53:40,475 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33197 2024-12-16T18:53:40,475 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:53:40,478 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:53:40,479 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:40,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:53:40,487 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33197 connecting to ZooKeeper ensemble=127.0.0.1:53238 2024-12-16T18:53:40,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331970x0, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:53:40,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:331970x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:53:40,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:331970x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-16T18:53:40,508 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331970x0, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:53:40,509 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33197-0x100e45398430003 connected 2024-12-16T18:53:40,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33197 2024-12-16T18:53:40,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33197 2024-12-16T18:53:40,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33197 2024-12-16T18:53:40,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33197 2024-12-16T18:53:40,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33197 2024-12-16T18:53:40,531 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-16T18:53:40,545 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7a6e4bf70377:33197 2024-12-16T18:53:40,546 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1008): ClusterId : 9bfc7049-9725-47f5-9209-7c9f26f82cde 2024-12-16T18:53:40,546 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:53:40,553 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:53:40,553 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:53:40,562 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:53:40,562 DEBUG [RS:1;7a6e4bf70377:33197 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3381760b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:53:40,563 DEBUG [RS:1;7a6e4bf70377:33197 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c099075, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:53:40,563 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:53:40,563 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:53:40,563 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:53:40,564 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,36083,1734375217984 with isa=7a6e4bf70377/172.17.0.2:33197, startcode=1734375220472 2024-12-16T18:53:40,564 DEBUG [RS:1;7a6e4bf70377:33197 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:53:40,568 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60729, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:53:40,568 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,568 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,571 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 2024-12-16T18:53:40,571 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35231 2024-12-16T18:53:40,571 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:53:40,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:53:40,578 DEBUG [RS:1;7a6e4bf70377:33197 {}] zookeeper.ZKUtil(111): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,578 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,33197,1734375220472] 2024-12-16T18:53:40,578 WARN [RS:1;7a6e4bf70377:33197 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:53:40,579 INFO [RS:1;7a6e4bf70377:33197 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:53:40,579 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,584 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:53:40,584 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:53:40,587 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:53:40,588 INFO [RS:1;7a6e4bf70377:33197 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:53:40,588 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,588 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:53:40,589 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,589 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:53:40,590 DEBUG [RS:1;7a6e4bf70377:33197 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:53:40,591 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,591 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,591 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,591 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,591 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,33197,1734375220472-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:53:40,610 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:53:40,610 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,33197,1734375220472-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:53:40,628 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.Replication(204): 7a6e4bf70377,33197,1734375220472 started 2024-12-16T18:53:40,628 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,33197,1734375220472, RpcServer on 7a6e4bf70377/172.17.0.2:33197, sessionid=0x100e45398430003 2024-12-16T18:53:40,629 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;7a6e4bf70377:33197,5,FailOnTimeoutGroup] 2024-12-16T18:53:40,629 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:53:40,629 DEBUG [RS:1;7a6e4bf70377:33197 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,629 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-16T18:53:40,629 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,33197,1734375220472' 2024-12-16T18:53:40,629 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:53:40,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,33197,1734375220472 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,33197,1734375220472' 2024-12-16T18:53:40,630 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:53:40,633 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T18:53:40,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-16T18:53:40,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-16T18:53:40,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:53:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-16T18:53:40,638 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:53:40,638 DEBUG [RS:1;7a6e4bf70377:33197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:53:40,639 INFO [RS:1;7a6e4bf70377:33197 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:53:40,639 INFO [RS:1;7a6e4bf70377:33197 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:53:40,639 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:53:40,639 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:40,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-16T18:53:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:53:40,642 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:53:40,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741837_1013 (size=393) 2024-12-16T18:53:40,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741837_1013 (size=393) 2024-12-16T18:53:40,652 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f41842edafcf2308940ff2b22b3c218f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20 2024-12-16T18:53:40,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741838_1014 (size=76) 2024-12-16T18:53:40,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46877 is added to blk_1073741838_1014 (size=76) 2024-12-16T18:53:40,666 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:40,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing f41842edafcf2308940ff2b22b3c218f, disabling compactions & flushes 2024-12-16T18:53:40,667 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:40,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:40,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. after waiting 0 ms 2024-12-16T18:53:40,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:40,667 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:40,667 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:53:40,668 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:53:40,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1734375220668"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375220668"}]},"ts":"1734375220668"} 2024-12-16T18:53:40,671 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:53:40,672 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:53:40,672 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375220672"}]},"ts":"1734375220672"} 2024-12-16T18:53:40,674 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-16T18:53:40,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f41842edafcf2308940ff2b22b3c218f, ASSIGN}] 2024-12-16T18:53:40,697 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f41842edafcf2308940ff2b22b3c218f, ASSIGN 2024-12-16T18:53:40,698 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f41842edafcf2308940ff2b22b3c218f, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45905,1734375218213; forceNewPlan=false, retain=false 2024-12-16T18:53:40,741 INFO [RS:1;7a6e4bf70377:33197 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C33197%2C1734375220472, suffix=, logDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,33197,1734375220472, archiveDir=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs, maxLogs=32 2024-12-16T18:53:40,742 INFO [RS:1;7a6e4bf70377:33197 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C33197%2C1734375220472.1734375220742 2024-12-16T18:53:40,751 INFO [RS:1;7a6e4bf70377:33197 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,33197,1734375220472/7a6e4bf70377%2C33197%2C1734375220472.1734375220742 2024-12-16T18:53:40,751 DEBUG [RS:1;7a6e4bf70377:33197 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38779:38779),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-16T18:53:40,850 INFO [7a6e4bf70377:36083 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T18:53:40,851 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f41842edafcf2308940ff2b22b3c218f, regionState=OPENING, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:40,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure f41842edafcf2308940ff2b22b3c218f, server=7a6e4bf70377,45905,1734375218213}] 2024-12-16T18:53:41,009 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:41,014 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:41,014 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => f41842edafcf2308940ff2b22b3c218f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:53:41,014 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,015 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:53:41,015 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,015 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,016 INFO [StoreOpener-f41842edafcf2308940ff2b22b3c218f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,018 INFO [StoreOpener-f41842edafcf2308940ff2b22b3c218f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f41842edafcf2308940ff2b22b3c218f columnFamilyName info 2024-12-16T18:53:41,018 DEBUG [StoreOpener-f41842edafcf2308940ff2b22b3c218f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:53:41,019 INFO [StoreOpener-f41842edafcf2308940ff2b22b3c218f-1 {}] regionserver.HStore(327): Store=f41842edafcf2308940ff2b22b3c218f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:53:41,020 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,020 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,023 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:53:41,025 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:53:41,026 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened f41842edafcf2308940ff2b22b3c218f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856020, jitterRate=0.08848690986633301}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:53:41,027 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:53:41,028 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f., pid=11, masterSystemTime=1734375221009 2024-12-16T18:53:41,031 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:41,031 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:41,032 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f41842edafcf2308940ff2b22b3c218f, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45905,1734375218213 2024-12-16T18:53:41,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T18:53:41,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure f41842edafcf2308940ff2b22b3c218f, server=7a6e4bf70377,45905,1734375218213 in 180 msec 2024-12-16T18:53:41,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T18:53:41,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f41842edafcf2308940ff2b22b3c218f, ASSIGN in 343 msec 2024-12-16T18:53:41,041 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:53:41,041 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375221041"}]},"ts":"1734375221041"} 2024-12-16T18:53:41,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-16T18:53:41,056 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:53:41,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 421 msec 2024-12-16T18:53:42,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-16T18:53:42,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-16T18:53:42,533 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-16T18:53:45,192 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:53:45,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:45,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:53:45,217 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T18:53:45,218 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:53:45,218 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-16T18:53:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:53:50,645 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-16T18:53:50,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-16T18:53:50,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:53:50,667 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:50,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:50,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:50,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:50,671 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:53:50,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b25f26c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:50,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23bd7700{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:50,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35781a4d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-33447-hadoop-hdfs-3_4_1-tests_jar-_-any-6715657791669488729/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:50,765 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59a37304{HTTP/1.1, (http/1.1)}{localhost:33447} 2024-12-16T18:53:50,765 INFO [Time-limited test {}] server.Server(415): Started @135131ms 2024-12-16T18:53:50,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:53:50,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:50,798 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:50,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:50,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:50,799 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:53:50,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2452fdd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:50,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@184837c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:50,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f70aee9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-39495-hadoop-hdfs-3_4_1-tests_jar-_-any-10380074447676024981/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:50,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7dcb64b4{HTTP/1.1, (http/1.1)}{localhost:39495} 2024-12-16T18:53:50,895 INFO [Time-limited test {}] server.Server(415): Started @135262ms 2024-12-16T18:53:50,897 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:53:50,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:53:50,931 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:53:50,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:53:50,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:53:50,932 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:53:50,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae908de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:53:50,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ae54a19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:53:51,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63985dfc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-36519-hadoop-hdfs-3_4_1-tests_jar-_-any-2980769458636577080/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:51,029 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c8376fb{HTTP/1.1, (http/1.1)}{localhost:36519} 2024-12-16T18:53:51,029 INFO [Time-limited test {}] server.Server(415): Started @135395ms 2024-12-16T18:53:51,030 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:53:51,863 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:51,863 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:51,880 WARN [Thread-612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:53:51,883 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9561f8d5656f6587 with lease ID 0x6104860f8492c420: Processing first storage report for DS-ed845963-89e6-4f95-8962-4ed4be9648ab from datanode DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:51,883 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9561f8d5656f6587 with lease ID 0x6104860f8492c420: from storage DS-ed845963-89e6-4f95-8962-4ed4be9648ab node DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:51,883 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9561f8d5656f6587 with lease ID 0x6104860f8492c420: Processing first storage report for DS-698208f6-fce9-45bf-a0f1-d34e6a56dd84 from datanode DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:51,883 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9561f8d5656f6587 with lease ID 0x6104860f8492c420: from storage DS-698208f6-fce9-45bf-a0f1-d34e6a56dd84 node DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:52,010 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data7/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:52,010 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data8/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:52,032 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:53:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc00d47a02b16b27 with lease ID 0x6104860f8492c421: Processing first storage report for DS-d95bbcce-411c-4c82-a8c1-6daedaa528db from datanode DatanodeRegistration(127.0.0.1:43651, datanodeUuid=5a5bfcd6-5abc-4caf-bc91-60ea23deaf8f, infoPort=38295, infoSecurePort=0, ipcPort=44311, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc00d47a02b16b27 with lease ID 0x6104860f8492c421: from storage DS-d95bbcce-411c-4c82-a8c1-6daedaa528db node DatanodeRegistration(127.0.0.1:43651, datanodeUuid=5a5bfcd6-5abc-4caf-bc91-60ea23deaf8f, infoPort=38295, infoSecurePort=0, ipcPort=44311, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc00d47a02b16b27 with lease ID 0x6104860f8492c421: Processing first storage report for DS-f61d6175-3508-4aab-a39c-d5fedb7af2ed from datanode DatanodeRegistration(127.0.0.1:43651, datanodeUuid=5a5bfcd6-5abc-4caf-bc91-60ea23deaf8f, infoPort=38295, infoSecurePort=0, ipcPort=44311, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc00d47a02b16b27 with lease ID 0x6104860f8492c421: from storage DS-f61d6175-3508-4aab-a39c-d5fedb7af2ed node DatanodeRegistration(127.0.0.1:43651, datanodeUuid=5a5bfcd6-5abc-4caf-bc91-60ea23deaf8f, infoPort=38295, infoSecurePort=0, ipcPort=44311, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:52,107 WARN [Thread-693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data9/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:52,107 WARN [Thread-694 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data10/current/BP-919602402-172.17.0.2-1734375215671/current, will proceed with Du for space computation calculation, 2024-12-16T18:53:52,136 WARN [Thread-656 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:53:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3e0dcff4f28dd3c with lease ID 0x6104860f8492c422: Processing first storage report for DS-ecd28431-5389-4f45-9662-429b684634d8 from datanode DatanodeRegistration(127.0.0.1:34275, datanodeUuid=97814e61-7e38-4310-9cb2-bdb2425a06e1, infoPort=38243, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3e0dcff4f28dd3c with lease ID 0x6104860f8492c422: from storage DS-ecd28431-5389-4f45-9662-429b684634d8 node DatanodeRegistration(127.0.0.1:34275, datanodeUuid=97814e61-7e38-4310-9cb2-bdb2425a06e1, infoPort=38243, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3e0dcff4f28dd3c with lease ID 0x6104860f8492c422: Processing first storage report for DS-21e8b8e2-dfa2-44ab-a9e9-1af25427e96c from datanode DatanodeRegistration(127.0.0.1:34275, datanodeUuid=97814e61-7e38-4310-9cb2-bdb2425a06e1, infoPort=38243, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671) 2024-12-16T18:53:52,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3e0dcff4f28dd3c with lease ID 0x6104860f8492c422: from storage DS-21e8b8e2-dfa2-44ab-a9e9-1af25427e96c node DatanodeRegistration(127.0.0.1:34275, datanodeUuid=97814e61-7e38-4310-9cb2-bdb2425a06e1, infoPort=38243, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:53:52,168 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,167 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,167 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,167 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,172 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:53:52,172 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,33197,1734375220472/7a6e4bf70377%2C33197%2C1734375220472.1734375220742 block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:53:52,172 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:53:52,172 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK], DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:53:52,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1501341249_22 at /127.0.0.1:44712 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44712 dst: /127.0.0.1:44267 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:35298 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35298 dst: /127.0.0.1:46877 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1501341249_22 at /127.0.0.1:35346 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:46877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35346 dst: /127.0.0.1:46877 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:44656 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44656 dst: /127.0.0.1:44267 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:35296 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35296 dst: /127.0.0.1:46877 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,174 WARN [PacketResponder: BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46877] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:44652 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44652 dst: /127.0.0.1:44267 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,175 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:35260 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35260 dst: /127.0.0.1:46877 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,175 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:44616 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44616 dst: /127.0.0.1:44267 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41690fa2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:52,210 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e758761{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:53:52,210 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:53:52,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4496df05{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:53:52,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7920de13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:53:52,212 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:53:52,212 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:53:52,212 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid dde711e2-a90a-4a26-a874-2990693d37f9) service to localhost/127.0.0.1:35231 2024-12-16T18:53:52,212 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:53:52,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data3/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:52,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data4/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:52,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:53:52,213 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,213 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,214 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:57730 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57730 dst: /127.0.0.1:44267 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:57728 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57728 dst: /127.0.0.1:44267 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,213 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1501341249_22 at /127.0.0.1:57726 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57726 dst: /127.0.0.1:44267 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:57732 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57732 dst: /127.0.0.1:44267 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:53:52,224 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aa16c5a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:53:52,224 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ae2b76d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:53:52,224 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:53:52,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@410ad4c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:53:52,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@767293e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:53:52,226 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:53:52,226 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:53:52,226 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid 5a41e41c-25c9-4eca-a408-66279dbfb687) service to localhost/127.0.0.1:35231 2024-12-16T18:53:52,226 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:53:52,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data1/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:52,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data2/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:53:52,227 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:53:52,232 WARN [RS:0;7a6e4bf70377:45905.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,232 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C45905%2C1734375218213:(num 1734375219221) roll requested 2024-12-16T18:53:52,233 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375232233 2024-12-16T18:53:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45905 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45905 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36080 deadline: 1734375242231, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-16T18:53:52,257 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-16T18:53:52,257 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 2024-12-16T18:53:52,257 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38295:38295),(127.0.0.1/127.0.0.1:38243:38243)] 2024-12-16T18:53:52,257 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:53:52,257 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,258 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:53:52,259 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-16T18:53:52,259 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-16T18:53:52,259 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 2024-12-16T18:53:52,262 WARN [IPC Server handler 3 on default port 35231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1019 2024-12-16T18:53:52,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 after 5ms 2024-12-16T18:53:56,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 after 4006ms 2024-12-16T18:54:04,336 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 2024-12-16T18:54:04,338 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:04,339 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 block BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:04,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:35908 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35908 dst: /127.0.0.1:43651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:04,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:58598 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:34275:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58598 dst: /127.0.0.1:34275 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:04,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f70aee9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:04,355 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7dcb64b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:04,355 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:04,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@184837c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:04,356 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2452fdd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:04,358 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:54:04,358 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:54:04,358 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:54:04,358 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid 5a5bfcd6-5abc-4caf-bc91-60ea23deaf8f) service to localhost/127.0.0.1:35231 2024-12-16T18:54:04,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data7/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:04,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data8/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:04,360 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:54:04,363 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]] 2024-12-16T18:54:04,363 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]] 2024-12-16T18:54:04,363 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C45905%2C1734375218213:(num 1734375232233) roll requested 2024-12-16T18:54:04,364 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375244363 2024-12-16T18:54:04,367 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:04,367 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:04,367 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741841_1023 2024-12-16T18:54:04,370 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:04,375 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:04,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:32866 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024 to mirror 127.0.0.1:46877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:04,375 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:54:04,375 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024 2024-12-16T18:54:04,375 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:32866 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:04,375 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:32866 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32866 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:04,376 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] 2024-12-16T18:54:04,377 WARN [Thread-725 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:04,378 WARN [Thread-725 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]) is bad. 2024-12-16T18:54:04,378 WARN [Thread-725 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741843_1025 2024-12-16T18:54:04,378 WARN [Thread-725 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK] 2024-12-16T18:54:04,384 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 2024-12-16T18:54:04,385 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38243:38243),(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:04,385 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:04,385 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 is not closed yet, will try archiving it next time 2024-12-16T18:54:04,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34275 is added to blk_1073741840_1022 (size=2431) 2024-12-16T18:54:04,788 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:07,154 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4a75ac79[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34275, datanodeUuid=97814e61-7e38-4310-9cb2-bdb2425a06e1, infoPort=38243, infoSecurePort=0, ipcPort=45579, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741840_1022 to 127.0.0.1:46877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:07,537 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:54:08,367 WARN [ResponseProcessor for block BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,368 WARN [DataStreamer for file /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 block BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:08,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:40726 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:34275:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40726 dst: /127.0.0.1:34275 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,370 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:32880 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32880 dst: /127.0.0.1:44075 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63985dfc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:08,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c8376fb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:08,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:08,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ae54a19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:08,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae908de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:08,375 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:54:08,375 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:54:08,375 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid 97814e61-7e38-4310-9cb2-bdb2425a06e1) service to localhost/127.0.0.1:35231 2024-12-16T18:54:08,375 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:54:08,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data9/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:08,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data10/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:08,376 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:54:08,378 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:08,378 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:08,379 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C45905%2C1734375218213:(num 1734375244363) roll requested 2024-12-16T18:54:08,379 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375248379 2024-12-16T18:54:08,382 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,382 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]) is bad. 2024-12-16T18:54:08,382 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741845_1028 2024-12-16T18:54:08,383 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK] 2024-12-16T18:54:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45905 {}] regionserver.HRegion(8581): Flush requested on f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:08,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f41842edafcf2308940ff2b22b3c218f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:54:08,385 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,385 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:08,385 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741846_1029 2024-12-16T18:54:08,386 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:08,388 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,388 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45574 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030 to mirror 127.0.0.1:46877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,389 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:54:08,389 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45574 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:08,389 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030 2024-12-16T18:54:08,389 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45574 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45574 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,390 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] 2024-12-16T18:54:08,393 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34275 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,392 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45590 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031 to mirror 127.0.0.1:34275 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,393 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:08,393 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45590 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:08,393 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031 2024-12-16T18:54:08,393 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45590 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45590 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,394 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:08,394 WARN [IPC Server handler 2 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-16T18:54:08,395 WARN [IPC Server handler 2 on default port 35231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-16T18:54:08,395 WARN [IPC Server handler 2 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-16T18:54:08,400 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 2024-12-16T18:54:08,400 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:08,400 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,400 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741844_1027 (size=14443) 2024-12-16T18:54:08,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/bd53846a2cc0410fbc015608f66b8da9 is 1080, key is row0002/info:/1734375244361/Put/seqid=0 2024-12-16T18:54:08,406 WARN [Thread-740 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,407 WARN [Thread-740 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK], DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]) is bad. 2024-12-16T18:54:08,407 WARN [Thread-740 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741850_1033 2024-12-16T18:54:08,407 WARN [Thread-740 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK] 2024-12-16T18:54:08,409 WARN [Thread-740 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,409 WARN [Thread-740 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:54:08,409 WARN [Thread-740 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741851_1034 2024-12-16T18:54:08,409 WARN [Thread-740 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] 2024-12-16T18:54:08,412 WARN [Thread-740 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34275 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45610 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035 to mirror 127.0.0.1:34275 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,412 WARN [Thread-740 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:08,412 WARN [Thread-740 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035 2024-12-16T18:54:08,412 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45610 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-16T18:54:08,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45610 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45610 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,413 WARN [Thread-740 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:08,414 WARN [Thread-740 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,414 WARN [Thread-740 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:08,414 WARN [Thread-740 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741853_1036 2024-12-16T18:54:08,414 WARN [Thread-740 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:08,415 WARN [IPC Server handler 1 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-16T18:54:08,415 WARN [IPC Server handler 1 on default port 35231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-16T18:54:08,415 WARN [IPC Server handler 1 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-16T18:54:08,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741854_1037 (size=10347) 2024-12-16T18:54:08,601 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:08,601 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:08,601 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C45905%2C1734375218213:(num 1734375248379) roll requested 2024-12-16T18:54:08,602 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375248602 2024-12-16T18:54:08,607 WARN [Thread-749 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,607 WARN [Thread-749 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK], DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]) is bad. 2024-12-16T18:54:08,608 WARN [Thread-749 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741855_1038 2024-12-16T18:54:08,609 WARN [Thread-749 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK] 2024-12-16T18:54:08,611 WARN [Thread-749 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,611 WARN [Thread-749 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:08,611 WARN [Thread-749 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741856_1039 2024-12-16T18:54:08,613 WARN [Thread-749 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:08,615 WARN [Thread-749 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34275 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45632 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040 to mirror 127.0.0.1:34275 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,616 WARN [Thread-749 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:08,616 WARN [Thread-749 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040 2024-12-16T18:54:08,616 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45632 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:08,616 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_79391528_22 at /127.0.0.1:45632 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45632 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,616 WARN [Thread-749 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:08,618 WARN [Thread-749 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,618 WARN [Thread-749 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:54:08,618 WARN [Thread-749 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741858_1041 2024-12-16T18:54:08,618 WARN [Thread-749 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] 2024-12-16T18:54:08,619 WARN [IPC Server handler 4 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-16T18:54:08,619 WARN [IPC Server handler 4 on default port 35231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-16T18:54:08,619 WARN [IPC Server handler 4 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-16T18:54:08,624 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248602 2024-12-16T18:54:08,624 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:08,624 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,624 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,624 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741849_1032 (size=1261) 2024-12-16T18:54:08,803 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,803 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,803 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-16T18:54:08,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/bd53846a2cc0410fbc015608f66b8da9 2024-12-16T18:54:08,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/bd53846a2cc0410fbc015608f66b8da9 as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/info/bd53846a2cc0410fbc015608f66b8da9 2024-12-16T18:54:08,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/info/bd53846a2cc0410fbc015608f66b8da9, entries=5, sequenceid=12, filesize=10.1 K 2024-12-16T18:54:08,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for f41842edafcf2308940ff2b22b3c218f in 452ms, sequenceid=12, compaction requested=false 2024-12-16T18:54:08,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:54:08,922 WARN [master/7a6e4bf70377:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,922 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C36083%2C1734375217984:(num 1734375218574) roll requested 2024-12-16T18:54:08,923 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C36083%2C1734375217984.1734375248922 2024-12-16T18:54:08,923 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,923 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45656 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043 to mirror 127.0.0.1:44267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,929 WARN [Thread-755 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,929 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45656 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:08,929 WARN [Thread-755 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]) is bad. 2024-12-16T18:54:08,929 WARN [Thread-755 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043 2024-12-16T18:54:08,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45656 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45656 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,930 WARN [Thread-755 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK] 2024-12-16T18:54:08,932 WARN [Thread-755 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,933 WARN [Thread-755 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK]) is bad. 2024-12-16T18:54:08,933 WARN [Thread-755 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741861_1044 2024-12-16T18:54:08,934 WARN [Thread-755 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46877,DS-8ca290d4-c640-4beb-aff9-862f88137c9c,DISK] 2024-12-16T18:54:08,935 WARN [Thread-755 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,936 WARN [Thread-755 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:08,936 WARN [Thread-755 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741862_1045 2024-12-16T18:54:08,937 WARN [Thread-755 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:08,942 WARN [Thread-755 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43651 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45658 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6]'}, localName='127.0.0.1:44075', datanodeUuid='f611238f-24f5-4831-a793-a1f9f560c0a4', xmitsInProgress=0}:Exception transferring block BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046 to mirror 127.0.0.1:43651 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,942 WARN [Thread-755 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK], DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:08,942 WARN [Thread-755 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046 2024-12-16T18:54:08,942 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45658 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-16T18:54:08,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-878176964_22 at /127.0.0.1:45658 [Receiving block BP-919602402-172.17.0.2-1734375215671:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:44075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45658 dst: /127.0.0.1:44075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:08,943 WARN [Thread-755 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:08,944 WARN [IPC Server handler 2 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-16T18:54:08,944 WARN [IPC Server handler 2 on default port 35231 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-16T18:54:08,944 WARN [IPC Server handler 2 on default port 35231 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-16T18:54:08,946 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-16T18:54:08,946 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 with entries=93, filesize=46.05 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 2024-12-16T18:54:08,947 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:08,947 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 is not closed yet, will try archiving it next time 2024-12-16T18:54:08,947 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,947 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:08,947 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 2024-12-16T18:54:08,948 WARN [IPC Server handler 0 on default port 35231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 has not been closed. Lease recovery is in progress. RecoveryId = 1048 for block blk_1073741830_1016 2024-12-16T18:54:08,948 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 after 1ms 2024-12-16T18:54:08,972 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:54:08,973 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:54:09,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:09,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:54:09,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:54:09,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:54:09,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:54:09,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f6c90b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:54:09,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32d73f5b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:54:09,026 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:09,027 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs/7a6e4bf70377%2C45905%2C1734375218213.1734375232233 2024-12-16T18:54:09,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71ab587f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/java.io.tmpdir/jetty-localhost-43735-hadoop-hdfs-3_4_1-tests_jar-_-any-17475634586684684076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:09,113 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39c485d7{HTTP/1.1, (http/1.1)}{localhost:43735} 2024-12-16T18:54:09,114 INFO [Time-limited test {}] server.Server(415): Started @153480ms 2024-12-16T18:54:09,115 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:54:09,493 WARN [Thread-775 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:54:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95590c63402e19e1 with lease ID 0x6104860f8492c423: from storage DS-8ca290d4-c640-4beb-aff9-862f88137c9c node DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T18:54:09,497 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95590c63402e19e1 with lease ID 0x6104860f8492c423: from storage DS-340dfb35-c2df-4844-b569-7fb2eae00d76 node DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:54:09,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@327ec150[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741844_1027 to 127.0.0.1:43651 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:09,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@247a0c07[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741854_1037 to 127.0.0.1:43651 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:10,563 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:54:10,568 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:54:10,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@247a0c07[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44075, datanodeUuid=f611238f-24f5-4831-a793-a1f9f560c0a4, infoPort=33917, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741849_1032 to 127.0.0.1:43651 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:12,951 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 after 4003ms 2024-12-16T18:54:18,497 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@26e6b4dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741836_1012 to 127.0.0.1:34275 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741838_1014 (size=76) 2024-12-16T18:54:19,497 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@26e6b4dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741828_1004 to 127.0.0.1:43651 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:19,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:54:21,327 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T18:54:21,327 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T18:54:21,497 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@26e6b4dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741827_1003 to 127.0.0.1:34275 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:21,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:54:22,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:54:24,499 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@28214fa9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741826_1002 to 127.0.0.1:34275 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741837_1013 (size=393) 2024-12-16T18:54:25,500 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@28214fa9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32859, datanodeUuid=dde711e2-a90a-4a26-a874-2990693d37f9, infoPort=36155, infoSecurePort=0, ipcPort=34741, storageInfo=lv=-57;cid=testClusterID;nsid=1946467100;c=1734375215671):Failed to transfer BP-919602402-172.17.0.2-1734375215671:blk_1073741831_1007 to 127.0.0.1:34275 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:54:26,015 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 10347 2024-12-16T18:54:28,159 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.1734375268159 2024-12-16T18:54:28,167 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248602 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375268159 2024-12-16T18:54:28,167 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36155:36155),(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:28,167 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:28,167 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248602 is not closed yet, will try archiving it next time 2024-12-16T18:54:28,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741859_1042 (size=1618) 2024-12-16T18:54:28,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45905 {}] regionserver.HRegion(8581): Flush requested on f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:28,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f41842edafcf2308940ff2b22b3c218f 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-16T18:54:28,171 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-16T18:54:28,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/7c97547d2f1e475fa541e9bbaf37157b is 1080, key is row0007/info:/1734375248384/Put/seqid=0 2024-12-16T18:54:28,178 WARN [Thread-810 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,178 WARN [Thread-810 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741866_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK], DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]) is bad. 2024-12-16T18:54:28,178 WARN [Thread-810 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741866_1050 2024-12-16T18:54:28,179 WARN [Thread-810 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK] 2024-12-16T18:54:28,180 WARN [Thread-810 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,180 WARN [Thread-810 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741867_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK], DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:28,180 WARN [Thread-810 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741867_1051 2024-12-16T18:54:28,181 WARN [Thread-810 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:28,187 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:54:28,187 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T18:54:28,187 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bafdd00 to 127.0.0.1:53238 2024-12-16T18:54:28,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:28,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:54:28,188 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=691500191, stopped=false 2024-12-16T18:54:28,188 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,36083,1734375217984 2024-12-16T18:54:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741868_1052 (size=13583) 2024-12-16T18:54:28,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741868_1052 (size=13583) 2024-12-16T18:54:28,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/7c97547d2f1e475fa541e9bbaf37157b 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:54:28,201 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:28,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:28,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:28,201 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,45905,1734375218213' ***** 2024-12-16T18:54:28,201 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:54:28,201 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,33197,1734375220472' ***** 2024-12-16T18:54:28,201 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:54:28,201 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:54:28,201 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:54:28,202 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:54:28,202 INFO [RS:1;7a6e4bf70377:33197 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:54:28,202 INFO [RS:1;7a6e4bf70377:33197 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:54:28,202 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,33197,1734375220472 2024-12-16T18:54:28,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:54:28,202 DEBUG [RS:1;7a6e4bf70377:33197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:28,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:54:28,202 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,33197,1734375220472; all regions closed. 2024-12-16T18:54:28,202 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:54:28,203 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,33197,1734375220472 2024-12-16T18:54:28,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/.tmp/info/7c97547d2f1e475fa541e9bbaf37157b as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/info/7c97547d2f1e475fa541e9bbaf37157b 2024-12-16T18:54:28,203 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,203 ERROR [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... 2024-12-16T18:54:28,203 DEBUG [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,203 DEBUG [RS:1;7a6e4bf70377:33197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:28,203 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:54:28,203 INFO [RS:1;7a6e4bf70377:33197 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T18:54:28,204 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:54:28,204 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:54:28,204 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:54:28,204 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:54:28,204 INFO [RS:1;7a6e4bf70377:33197 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33197 2024-12-16T18:54:28,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,33197,1734375220472 2024-12-16T18:54:28,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:54:28,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/info/7c97547d2f1e475fa541e9bbaf37157b, entries=8, sequenceid=25, filesize=13.3 K 2024-12-16T18:54:28,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for f41842edafcf2308940ff2b22b3c218f in 41ms, sequenceid=25, compaction requested=false 2024-12-16T18:54:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:54:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-16T18:54:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:54:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f41842edafcf2308940ff2b22b3c218f/info/7c97547d2f1e475fa541e9bbaf37157b because midkey is the same as first or last row 2024-12-16T18:54:28,211 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:54:28,211 INFO [RS:0;7a6e4bf70377:45905 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:54:28,211 INFO [RS:0;7a6e4bf70377:45905 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:54:28,211 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3579): Received CLOSE for 0d6a1a3a8642b152dd163e8eaa56f05b 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3579): Received CLOSE for f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,45905,1734375218213 2024-12-16T18:54:28,213 DEBUG [RS:0;7a6e4bf70377:45905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 0d6a1a3a8642b152dd163e8eaa56f05b, disabling compactions & flushes 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:54:28,213 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. after waiting 0 ms 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:54:28,213 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 0d6a1a3a8642b152dd163e8eaa56f05b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:54:28,213 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-16T18:54:28,213 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1603): Online Regions={0d6a1a3a8642b152dd163e8eaa56f05b=hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b., 1588230740=hbase:meta,,1.1588230740, f41842edafcf2308940ff2b22b3c218f=TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f.} 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:54:28,213 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1629): Waiting on 0d6a1a3a8642b152dd163e8eaa56f05b, 1588230740, f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:28,213 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:54:28,213 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:54:28,214 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:54:28,214 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-16T18:54:28,214 WARN [RS_OPEN_META-regionserver/7a6e4bf70377:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,214 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C45905%2C1734375218213.meta:.meta(num 1734375219669) roll requested 2024-12-16T18:54:28,214 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:54:28,215 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C45905%2C1734375218213.meta.1734375268214.meta 2024-12-16T18:54:28,215 ERROR [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 7a6e4bf70377,45905,1734375218213: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,215 ERROR [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-16T18:54:28,218 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,33197,1734375220472] 2024-12-16T18:54:28,218 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,33197,1734375220472; numProcessing=1 2024-12-16T18:54:28,218 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-16T18:54:28,220 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-16T18:54:28,220 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-16T18:54:28,220 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-16T18:54:28,220 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 264796544 }, "NonHeapMemoryUsage": { "committed": 161546240, "init": 7667712, "max": -1, "used": 159533824 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-16T18:54:28,224 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083 {}] master.MasterRpcServices(626): 7a6e4bf70377,45905,1734375218213 reported a fatal error: ***** ABORTING region server 7a6e4bf70377,45905,1734375218213: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-16T18:54:28,226 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,33197,1734375220472 already deleted, retry=false 2024-12-16T18:54:28,226 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,33197,1734375220472 expired; onlineServers=1 2024-12-16T18:54:28,231 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-16T18:54:28,231 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375268214.meta 2024-12-16T18:54:28,232 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36155:36155),(127.0.0.1/127.0.0.1:33917:33917)] 2024-12-16T18:54:28,232 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta is not closed yet, will try archiving it next time 2024-12-16T18:54:28,232 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,232 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44267,DS-0f66e070-8957-4826-9a9d-a92f0301cce9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,232 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta 2024-12-16T18:54:28,233 WARN [IPC Server handler 2 on default port 35231 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta has not been closed. Lease recovery is in progress. RecoveryId = 1054 for block blk_1073741834_1018 2024-12-16T18:54:28,233 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta after 1ms 2024-12-16T18:54:28,236 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/.tmp/info/7b69b20590fd4500bae3a67745d99b00 is 45, key is default/info:d/1734375220315/Put/seqid=0 2024-12-16T18:54:28,238 WARN [Thread-820 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:28,238 WARN [Thread-820 {}] hdfs.DataStreamer(1731): Error Recovery for BP-919602402-172.17.0.2-1734375215671:blk_1073741870_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK], DatanodeInfoWithStorage[127.0.0.1:43651,DS-d95bbcce-411c-4c82-a8c1-6daedaa528db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK]) is bad. 2024-12-16T18:54:28,239 WARN [Thread-820 {}] hdfs.DataStreamer(1850): Abandoning BP-919602402-172.17.0.2-1734375215671:blk_1073741870_1055 2024-12-16T18:54:28,239 WARN [Thread-820 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34275,DS-ecd28431-5389-4f45-9662-429b684634d8,DISK] 2024-12-16T18:54:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741871_1056 (size=5037) 2024-12-16T18:54:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741871_1056 (size=5037) 2024-12-16T18:54:28,248 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/.tmp/info/7b69b20590fd4500bae3a67745d99b00 2024-12-16T18:54:28,255 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/.tmp/info/7b69b20590fd4500bae3a67745d99b00 as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/info/7b69b20590fd4500bae3a67745d99b00 2024-12-16T18:54:28,262 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/info/7b69b20590fd4500bae3a67745d99b00, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T18:54:28,263 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 0d6a1a3a8642b152dd163e8eaa56f05b in 50ms, sequenceid=6, compaction requested=false 2024-12-16T18:54:28,268 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/data/hbase/namespace/0d6a1a3a8642b152dd163e8eaa56f05b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T18:54:28,269 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 0d6a1a3a8642b152dd163e8eaa56f05b: 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375219779.0d6a1a3a8642b152dd163e8eaa56f05b. 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f41842edafcf2308940ff2b22b3c218f, disabling compactions & flushes 2024-12-16T18:54:28,269 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. after waiting 0 ms 2024-12-16T18:54:28,269 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,270 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:54:28,270 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:28,318 INFO [RS:1;7a6e4bf70377:33197 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,33197,1734375220472; zookeeper connection closed. 2024-12-16T18:54:28,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33197-0x100e45398430003, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:28,318 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ea671f8 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ea671f8 2024-12-16T18:54:28,414 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:54:28,414 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(3579): Received CLOSE for f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:54:28,414 DEBUG [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, f41842edafcf2308940ff2b22b3c218f 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f41842edafcf2308940ff2b22b3c218f, disabling compactions & flushes 2024-12-16T18:54:28,414 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,414 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. after waiting 0 ms 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f41842edafcf2308940ff2b22b3c218f: 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-16T18:54:28,414 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1734375220634.f41842edafcf2308940ff2b22b3c218f. 2024-12-16T18:54:28,570 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 is not closed yet, will try archiving it next time 2024-12-16T18:54:28,570 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs/7a6e4bf70377%2C45905%2C1734375218213.1734375244363 2024-12-16T18:54:28,572 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs/7a6e4bf70377%2C45905%2C1734375218213.1734375248379 2024-12-16T18:54:28,573 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375248602 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs/7a6e4bf70377%2C45905%2C1734375218213.1734375248602 2024-12-16T18:54:28,593 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:54:28,614 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-16T18:54:28,614 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,45905,1734375218213; all regions closed. 2024-12-16T18:54:28,615 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213 2024-12-16T18:54:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741869_1053 (size=93) 2024-12-16T18:54:28,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741869_1053 (size=93) 2024-12-16T18:54:29,084 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:54:29,103 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-16T18:54:29,104 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-16T18:54:30,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741859_1042 (size=1618) 2024-12-16T18:54:32,235 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta after 4003ms 2024-12-16T18:54:32,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T18:54:33,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,618 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-16T18:54:33,619 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213 2024-12-16T18:54:33,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741865_1049 (size=13280) 2024-12-16T18:54:33,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741865_1049 (size=13280) 2024-12-16T18:54:33,792 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:54:33,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:33,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:37,538 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:54:38,651 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-16T18:54:38,651 DEBUG [RS:0;7a6e4bf70377:45905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:38,652 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:54:38,652 INFO [RS:0;7a6e4bf70377:45905 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T18:54:38,653 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:54:38,653 INFO [RS:0;7a6e4bf70377:45905 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45905 2024-12-16T18:54:38,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,45905,1734375218213 2024-12-16T18:54:38,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:54:38,734 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,45905,1734375218213] 2024-12-16T18:54:38,734 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,45905,1734375218213; numProcessing=2 2024-12-16T18:54:38,742 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,45905,1734375218213 already deleted, retry=false 2024-12-16T18:54:38,742 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,45905,1734375218213 expired; onlineServers=0 2024-12-16T18:54:38,742 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,36083,1734375217984' ***** 2024-12-16T18:54:38,742 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:54:38,742 DEBUG [M:0;7a6e4bf70377:36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2117ac4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:54:38,743 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,36083,1734375217984 2024-12-16T18:54:38,743 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,36083,1734375217984; all regions closed. 2024-12-16T18:54:38,743 DEBUG [M:0;7a6e4bf70377:36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:54:38,743 DEBUG [M:0;7a6e4bf70377:36083 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:54:38,743 DEBUG [M:0;7a6e4bf70377:36083 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:54:38,743 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:54:38,743 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375218939 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375218939,5,FailOnTimeoutGroup] 2024-12-16T18:54:38,743 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375218947 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375218947,5,FailOnTimeoutGroup] 2024-12-16T18:54:38,743 INFO [M:0;7a6e4bf70377:36083 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:54:38,744 DEBUG [M:0;7a6e4bf70377:36083 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:54:38,744 INFO [M:0;7a6e4bf70377:36083 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:54:38,744 INFO [M:0;7a6e4bf70377:36083 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:54:38,744 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:54:38,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:54:38,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:38,751 DEBUG [M:0;7a6e4bf70377:36083 {}] zookeeper.ZKUtil(347): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:54:38,751 WARN [M:0;7a6e4bf70377:36083 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:54:38,751 INFO [M:0;7a6e4bf70377:36083 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:54:38,751 INFO [M:0;7a6e4bf70377:36083 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:54:38,751 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:54:38,751 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:38,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:54:38,751 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:38,752 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:54:38,752 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:38,752 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.30 KB 2024-12-16T18:54:38,753 WARN [sync.3 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:38,753 WARN [sync.3 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44075,DS-ed845963-89e6-4f95-8962-4ed4be9648ab,DISK]] 2024-12-16T18:54:38,754 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C36083%2C1734375217984:(num 1734375248922) roll requested 2024-12-16T18:54:38,754 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C36083%2C1734375217984.1734375278754 2024-12-16T18:54:38,764 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 with entries=1, filesize=349 B; new WAL /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375278754 2024-12-16T18:54:38,764 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33917:33917),(127.0.0.1/127.0.0.1:36155:36155)] 2024-12-16T18:54:38,764 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 is not closed yet, will try archiving it next time 2024-12-16T18:54:38,765 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 is not closed yet, will try archiving it next time 2024-12-16T18:54:38,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741864_1047 (size=357) 2024-12-16T18:54:38,772 DEBUG [M:0;7a6e4bf70377:36083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/577f1101e99044e4998446c0fef49965 is 82, key is hbase:meta,,1/info:regioninfo/1734375219715/Put/seqid=0 2024-12-16T18:54:38,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741873_1058 (size=5672) 2024-12-16T18:54:38,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741873_1058 (size=5672) 2024-12-16T18:54:38,777 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/577f1101e99044e4998446c0fef49965 2024-12-16T18:54:38,798 DEBUG [M:0;7a6e4bf70377:36083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b510028d00dc42b18f079fea66d34aba is 775, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734375221058/Put/seqid=0 2024-12-16T18:54:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741874_1059 (size=7466) 2024-12-16T18:54:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741874_1059 (size=7466) 2024-12-16T18:54:38,804 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.42 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b510028d00dc42b18f079fea66d34aba 2024-12-16T18:54:38,824 DEBUG [M:0;7a6e4bf70377:36083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d46569d05cd4830ad854d15ed85f4f2 is 69, key is 7a6e4bf70377,33197,1734375220472/rs:state/1734375220569/Put/seqid=0 2024-12-16T18:54:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741875_1060 (size=5224) 2024-12-16T18:54:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741875_1060 (size=5224) 2024-12-16T18:54:38,830 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d46569d05cd4830ad854d15ed85f4f2 2024-12-16T18:54:38,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:38,834 INFO [RS:0;7a6e4bf70377:45905 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,45905,1734375218213; zookeeper connection closed. 2024-12-16T18:54:38,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45905-0x100e45398430001, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:38,834 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@47cefdc0 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@47cefdc0 2024-12-16T18:54:38,834 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-16T18:54:38,850 DEBUG [M:0;7a6e4bf70377:36083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/893db96b9e214a9996d6db05a053e58a is 52, key is load_balancer_on/state:d/1734375220455/Put/seqid=0 2024-12-16T18:54:38,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741876_1061 (size=5056) 2024-12-16T18:54:38,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741876_1061 (size=5056) 2024-12-16T18:54:38,856 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/893db96b9e214a9996d6db05a053e58a 2024-12-16T18:54:38,862 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/577f1101e99044e4998446c0fef49965 as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/577f1101e99044e4998446c0fef49965 2024-12-16T18:54:38,867 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/577f1101e99044e4998446c0fef49965, entries=8, sequenceid=97, filesize=5.5 K 2024-12-16T18:54:38,869 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b510028d00dc42b18f079fea66d34aba as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b510028d00dc42b18f079fea66d34aba 2024-12-16T18:54:38,875 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b510028d00dc42b18f079fea66d34aba, entries=11, sequenceid=97, filesize=7.3 K 2024-12-16T18:54:38,876 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3d46569d05cd4830ad854d15ed85f4f2 as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d46569d05cd4830ad854d15ed85f4f2 2024-12-16T18:54:38,881 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3d46569d05cd4830ad854d15ed85f4f2, entries=2, sequenceid=97, filesize=5.1 K 2024-12-16T18:54:38,883 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/893db96b9e214a9996d6db05a053e58a as hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/893db96b9e214a9996d6db05a053e58a 2024-12-16T18:54:38,888 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/893db96b9e214a9996d6db05a053e58a, entries=1, sequenceid=97, filesize=4.9 K 2024-12-16T18:54:38,889 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41051, heapSize ~49.23 KB/50416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=97, compaction requested=false 2024-12-16T18:54:38,891 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:38,891 DEBUG [M:0;7a6e4bf70377:36083 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:54:38,891 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984 2024-12-16T18:54:38,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741872_1057 (size=493) 2024-12-16T18:54:38,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44075 is added to blk_1073741872_1057 (size=493) 2024-12-16T18:54:39,168 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 is not closed yet, will try archiving it next time 2024-12-16T18:54:39,169 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/oldWALs/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 2024-12-16T18:54:39,179 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/oldWALs/7a6e4bf70377%2C36083%2C1734375217984.1734375248922 to hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/oldWALs/7a6e4bf70377%2C36083%2C1734375217984.1734375248922$masterlocalwal$ 2024-12-16T18:54:39,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:39,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32859 is added to blk_1073741864_1047 (size=357) 2024-12-16T18:54:40,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:40,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:41,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:41,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:42,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:42,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:42,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T18:54:43,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:43,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:43,894 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-16T18:54:43,894 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:54:43,894 INFO [M:0;7a6e4bf70377:36083 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:54:43,894 INFO [M:0;7a6e4bf70377:36083 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36083 2024-12-16T18:54:43,907 DEBUG [M:0;7a6e4bf70377:36083 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,36083,1734375217984 already deleted, retry=false 2024-12-16T18:54:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:44,017 INFO [M:0;7a6e4bf70377:36083 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,36083,1734375217984; zookeeper connection closed. 2024-12-16T18:54:44,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x100e45398430000, quorum=127.0.0.1:53238, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:54:44,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71ab587f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:44,020 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39c485d7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:44,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:44,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32d73f5b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:44,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f6c90b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:44,023 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:54:44,023 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid dde711e2-a90a-4a26-a874-2990693d37f9) service to localhost/127.0.0.1:35231 2024-12-16T18:54:44,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data3/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:44,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data4/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:44,024 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:54:44,024 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:54:44,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:54:44,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35781a4d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:44,027 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59a37304{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:44,027 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:44,028 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23bd7700{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:44,028 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b25f26c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:44,030 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:54:44,030 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:54:44,030 WARN [BP-919602402-172.17.0.2-1734375215671 heartbeating to localhost/127.0.0.1:35231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-919602402-172.17.0.2-1734375215671 (Datanode Uuid f611238f-24f5-4831-a793-a1f9f560c0a4) service to localhost/127.0.0.1:35231 2024-12-16T18:54:44,030 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:54:44,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data5/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:44,031 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:54:44,034 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/cluster_8b55f3d2-5e77-7330-bd7b-d95883a41361/dfs/data/data6/current/BP-919602402-172.17.0.2-1734375215671 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:44,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24064364{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:54:44,042 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72d4a98{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:44,042 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:44,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2be1c732{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:44,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@309e2ef7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:44,055 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:54:44,088 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:54:44,098 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 65) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$795/0x00007f7f9cb82cd0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35231 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:35231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35231 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$795/0x00007f7f9cb82cd0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$795/0x00007f7f9cb82cd0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35231 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35231 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-11-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=410 (was 406) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=256 (was 480), ProcessCount=11 (was 11), AvailableMemoryMB=4965 (was 4295) - AvailableMemoryMB LEAK? - 2024-12-16T18:54:44,106 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=410, MaxFileDescriptor=1048576, SystemLoadAverage=256, ProcessCount=11, AvailableMemoryMB=4964 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.log.dir so I do NOT create it in target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/eb46b8be-9ea8-4b7c-f331-f315c8948e9f/hadoop.tmp.dir so I do NOT create it in target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5, deleteOnExit=true 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:54:44,107 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/test.cache.data in system properties and HBase conf 2024-12-16T18:54:44,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:54:44,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:54:44,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:54:44,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:54:44,108 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:54:44,108 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:54:44,109 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:54:44,110 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:54:44,129 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:54:44,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:44,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:44,410 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:44,417 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:54:44,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:54:44,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:54:44,419 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:54:44,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:44,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@796327b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:54:44,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b87ebe3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:54:44,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@377bc93b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-45915-hadoop-hdfs-3_4_1-tests_jar-_-any-12485477083884787789/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:54:44,551 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24cd7079{HTTP/1.1, (http/1.1)}{localhost:45915} 2024-12-16T18:54:44,551 INFO [Time-limited test {}] server.Server(415): Started @188918ms 2024-12-16T18:54:44,570 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:54:44,837 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:44,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:54:44,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:54:44,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:54:44,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:54:44,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fd029e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:54:44,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b0c3c75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:54:44,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b56978{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-33371-hadoop-hdfs-3_4_1-tests_jar-_-any-13831840157023048465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:44,953 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@778bcc60{HTTP/1.1, (http/1.1)}{localhost:33371} 2024-12-16T18:54:44,953 INFO [Time-limited test {}] server.Server(415): Started @189319ms 2024-12-16T18:54:44,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:54:44,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:44,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:45,000 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:54:45,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:54:45,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:54:45,006 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:54:45,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74c5d2c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:54:45,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56e5c03c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:54:45,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35a9519b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-40939-hadoop-hdfs-3_4_1-tests_jar-_-any-2826715505823094247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:45,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e942200{HTTP/1.1, (http/1.1)}{localhost:40939} 2024-12-16T18:54:45,122 INFO [Time-limited test {}] server.Server(415): Started @189488ms 2024-12-16T18:54:45,123 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:54:45,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:45,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:45,797 WARN [Thread-954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data1/current/BP-23588281-172.17.0.2-1734375284143/current, will proceed with Du for space computation calculation, 2024-12-16T18:54:45,797 WARN [Thread-955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data2/current/BP-23588281-172.17.0.2-1734375284143/current, will proceed with Du for space computation calculation, 2024-12-16T18:54:45,821 WARN [Thread-919 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:54:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6072768b1ed947c7 with lease ID 0x486fa5d1bc775420: Processing first storage report for DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4 from datanode DatanodeRegistration(127.0.0.1:35693, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=34739, infoSecurePort=0, ipcPort=36093, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143) 2024-12-16T18:54:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6072768b1ed947c7 with lease ID 0x486fa5d1bc775420: from storage DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4 node DatanodeRegistration(127.0.0.1:35693, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=34739, infoSecurePort=0, ipcPort=36093, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:54:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6072768b1ed947c7 with lease ID 0x486fa5d1bc775420: Processing first storage report for DS-ecb495d2-4e3e-4c19-932c-a0069e100e4e from datanode DatanodeRegistration(127.0.0.1:35693, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=34739, infoSecurePort=0, ipcPort=36093, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143) 2024-12-16T18:54:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6072768b1ed947c7 with lease ID 0x486fa5d1bc775420: from storage DS-ecb495d2-4e3e-4c19-932c-a0069e100e4e node DatanodeRegistration(127.0.0.1:35693, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=34739, infoSecurePort=0, ipcPort=36093, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:54:45,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:46,061 WARN [Thread-966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data3/current/BP-23588281-172.17.0.2-1734375284143/current, will proceed with Du for space computation calculation, 2024-12-16T18:54:46,061 WARN [Thread-967 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data4/current/BP-23588281-172.17.0.2-1734375284143/current, will proceed with Du for space computation calculation, 2024-12-16T18:54:46,077 WARN [Thread-942 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:54:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52563d9936736c27 with lease ID 0x486fa5d1bc775421: Processing first storage report for DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155 from datanode DatanodeRegistration(127.0.0.1:40047, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=38287, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143) 2024-12-16T18:54:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52563d9936736c27 with lease ID 0x486fa5d1bc775421: from storage DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155 node DatanodeRegistration(127.0.0.1:40047, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=38287, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:54:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52563d9936736c27 with lease ID 0x486fa5d1bc775421: Processing first storage report for DS-b24d1b6d-4adf-420e-9378-ad881bc69c00 from datanode DatanodeRegistration(127.0.0.1:40047, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=38287, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143) 2024-12-16T18:54:46,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52563d9936736c27 with lease ID 0x486fa5d1bc775421: from storage DS-b24d1b6d-4adf-420e-9378-ad881bc69c00 node DatanodeRegistration(127.0.0.1:40047, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=38287, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:54:46,157 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174 2024-12-16T18:54:46,160 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/zookeeper_0, clientPort=63223, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:54:46,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63223 2024-12-16T18:54:46,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:54:46,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:54:46,181 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89 with version=8 2024-12-16T18:54:46,181 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:54:46,183 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:54:46,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:54:46,184 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:54:46,185 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36945 2024-12-16T18:54:46,185 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,189 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36945 connecting to ZooKeeper ensemble=127.0.0.1:63223 2024-12-16T18:54:46,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:369450x0, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:54:46,247 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36945-0x100e454a4490000 connected 2024-12-16T18:54:46,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:46,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:54:46,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:54:46,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:54:46,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36945 2024-12-16T18:54:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36945 2024-12-16T18:54:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36945 2024-12-16T18:54:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36945 2024-12-16T18:54:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36945 2024-12-16T18:54:46,318 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89, hbase.cluster.distributed=false 2024-12-16T18:54:46,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:46,333 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:54:46,333 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:54:46,334 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42217 2024-12-16T18:54:46,334 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:54:46,335 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:54:46,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,340 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42217 connecting to ZooKeeper ensemble=127.0.0.1:63223 2024-12-16T18:54:46,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422170x0, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:54:46,350 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422170x0, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:54:46,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42217-0x100e454a4490001 connected 2024-12-16T18:54:46,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:54:46,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:54:46,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42217 2024-12-16T18:54:46,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42217 2024-12-16T18:54:46,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42217 2024-12-16T18:54:46,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42217 2024-12-16T18:54:46,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42217 2024-12-16T18:54:46,354 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:54:46,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:54:46,362 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,365 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:36945 2024-12-16T18:54:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:54:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:54:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,370 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:54:46,371 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,36945,1734375286183 from backup master directory 2024-12-16T18:54:46,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:54:46,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,378 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:54:46,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:54:46,378 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:54:46,378 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:54:46,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:54:46,393 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/hbase.id with ID: 9b9bb445-1f2f-40fb-ab18-25789f0c1c0c 2024-12-16T18:54:46,412 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:46,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:54:46,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:54:46,435 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:54:46,436 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:54:46,437 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:54:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:54:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:54:46,447 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store 2024-12-16T18:54:46,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:54:46,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:54:46,455 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:46,455 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:54:46,455 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:54:46,456 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/.initializing 2024-12-16T18:54:46,456 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,459 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C36945%2C1734375286183, suffix=, logDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183, archiveDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/oldWALs, maxLogs=10 2024-12-16T18:54:46,459 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C36945%2C1734375286183.1734375286459 2024-12-16T18:54:46,465 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 2024-12-16T18:54:46,466 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38287:38287),(127.0.0.1/127.0.0.1:34739:34739)] 2024-12-16T18:54:46,466 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:54:46,466 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:46,466 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,466 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:54:46,470 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:46,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:54:46,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:54:46,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:54:46,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:54:46,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:54:46,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:54:46,482 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,483 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,486 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:54:46,488 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:54:46,494 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:54:46,495 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767641, jitterRate=-0.023894906044006348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:54:46,496 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:54:46,498 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:54:46,502 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35bd1586, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:54:46,503 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:54:46,504 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:54:46,504 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:54:46,504 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:54:46,504 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-16T18:54:46,505 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-16T18:54:46,505 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:54:46,508 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:54:46,510 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:54:46,520 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:54:46,520 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:54:46,523 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:54:46,533 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:54:46,534 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:54:46,535 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:54:46,545 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:54:46,550 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:54:46,558 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:54:46,582 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:54:46,600 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:54:46,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:54:46,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:54:46,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,611 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,36945,1734375286183, sessionid=0x100e454a4490000, setting cluster-up flag (Was=false) 2024-12-16T18:54:46,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,678 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:54:46,680 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:46,736 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:54:46,738 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:46,741 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:54:46,741 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:54:46,741 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,36945,1734375286183 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:54:46,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,747 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375316747 2024-12-16T18:54:46,747 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:54:46,747 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:54:46,747 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:54:46,747 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:54:46,748 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:54:46,748 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:54:46,748 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:54:46,748 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:54:46,749 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,749 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:54:46,754 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,754 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:54:46,754 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:54:46,754 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:54:46,758 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:54:46,758 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:54:46,768 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:42217 2024-12-16T18:54:46,769 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1008): ClusterId : 9b9bb445-1f2f-40fb-ab18-25789f0c1c0c 2024-12-16T18:54:46,769 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:54:46,774 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375286758,5,FailOnTimeoutGroup] 2024-12-16T18:54:46,782 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375286774,5,FailOnTimeoutGroup] 2024-12-16T18:54:46,782 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,782 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:54:46,782 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,782 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,789 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:54:46,789 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:54:46,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:54:46,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:54:46,801 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:54:46,801 DEBUG [RS:0;7a6e4bf70377:42217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7067cac9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:54:46,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:54:46,803 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89 2024-12-16T18:54:46,806 DEBUG [RS:0;7a6e4bf70377:42217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ca241c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:54:46,806 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:54:46,806 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:54:46,806 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:54:46,807 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,36945,1734375286183 with isa=7a6e4bf70377/172.17.0.2:42217, startcode=1734375286332 2024-12-16T18:54:46,807 DEBUG [RS:0;7a6e4bf70377:42217 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:54:46,810 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56969, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:54:46,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,812 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89 2024-12-16T18:54:46,813 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35803 2024-12-16T18:54:46,813 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:54:46,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:54:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:54:46,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:46,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:54:46,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:54:46,820 DEBUG [RS:0;7a6e4bf70377:42217 {}] zookeeper.ZKUtil(111): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,820 WARN [RS:0;7a6e4bf70377:42217 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:54:46,820 INFO [RS:0;7a6e4bf70377:42217 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:54:46,820 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,820 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,42217,1734375286332] 2024-12-16T18:54:46,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:54:46,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:46,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:54:46,823 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:54:46,823 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:54:46,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:54:46,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:46,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:54:46,825 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:54:46,825 INFO [RS:0;7a6e4bf70377:42217 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:54:46,826 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:54:46,826 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:54:46,826 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:46,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:46,827 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:54:46,827 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:54:46,827 DEBUG [RS:0;7a6e4bf70377:42217 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:54:46,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740 2024-12-16T18:54:46,828 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,828 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,828 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,828 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,828 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42217,1734375286332-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:54:46,829 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:54:46,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:54:46,836 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820816, jitterRate=0.04372183978557587}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:54:46,836 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:54:46,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:54:46,837 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:54:46,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:54:46,838 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:54:46,838 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:54:46,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:54:46,839 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:54:46,840 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:54:46,848 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:54:46,848 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42217,1734375286332-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:46,860 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.Replication(204): 7a6e4bf70377,42217,1734375286332 started 2024-12-16T18:54:46,861 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,42217,1734375286332, RpcServer on 7a6e4bf70377/172.17.0.2:42217, sessionid=0x100e454a4490001 2024-12-16T18:54:46,861 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:54:46,861 DEBUG [RS:0;7a6e4bf70377:42217 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,861 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42217,1734375286332' 2024-12-16T18:54:46,861 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:54:46,861 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42217,1734375286332' 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:54:46,862 DEBUG [RS:0;7a6e4bf70377:42217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:54:46,862 INFO [RS:0;7a6e4bf70377:42217 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:54:46,862 INFO [RS:0;7a6e4bf70377:42217 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:54:46,965 INFO [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42217%2C1734375286332, suffix=, logDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332, archiveDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs, maxLogs=32 2024-12-16T18:54:46,966 INFO [RS:0;7a6e4bf70377:42217 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:54:46,971 INFO [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:54:46,971 DEBUG [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34739:34739),(127.0.0.1/127.0.0.1:38287:38287)] 2024-12-16T18:54:46,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:46,990 DEBUG [7a6e4bf70377:36945 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:54:46,990 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:46,992 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42217,1734375286332, state=OPENING 2024-12-16T18:54:47,008 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:54:47,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:47,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:47,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42217,1734375286332}] 2024-12-16T18:54:47,055 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:54:47,055 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:54:47,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:47,211 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:54:47,213 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:54:47,217 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:54:47,217 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:54:47,220 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42217%2C1734375286332.meta, suffix=.meta, logDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332, archiveDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs, maxLogs=32 2024-12-16T18:54:47,221 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta 2024-12-16T18:54:47,233 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta 2024-12-16T18:54:47,234 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34739:34739),(127.0.0.1/127.0.0.1:38287:38287)] 2024-12-16T18:54:47,234 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:54:47,234 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:54:47,234 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:54:47,234 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:54:47,234 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:54:47,235 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:47,235 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:54:47,235 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:54:47,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:54:47,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:54:47,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:47,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:54:47,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:54:47,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:47,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:54:47,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:54:47,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:54:47,241 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740 2024-12-16T18:54:47,242 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740 2024-12-16T18:54:47,244 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:54:47,245 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:54:47,246 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764723, jitterRate=-0.027605026960372925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:54:47,247 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:54:47,248 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375287211 2024-12-16T18:54:47,250 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:54:47,250 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:54:47,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:47,251 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:47,251 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42217,1734375286332, state=OPEN 2024-12-16T18:54:47,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:54:47,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:54:47,283 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:54:47,283 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:54:47,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:54:47,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42217,1734375286332 in 228 msec 2024-12-16T18:54:47,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:54:47,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 447 msec 2024-12-16T18:54:47,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 548 msec 2024-12-16T18:54:47,289 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375287289, completionTime=-1 2024-12-16T18:54:47,289 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:54:47,290 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:54:47,290 DEBUG [hconnection-0x31a6597-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:54:47,292 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:54:47,293 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:54:47,293 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375347293 2024-12-16T18:54:47,293 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375407293 2024-12-16T18:54:47,293 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-16T18:54:47,317 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,317 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,317 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,317 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:36945, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,317 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,318 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:54:47,318 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:54:47,319 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:54:47,319 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:54:47,321 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:54:47,321 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,322 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:54:47,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:47,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:54:47,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:54:47,331 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e2b0fc1437b97a0dc1f54092591722bf, NAME => 'hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89 2024-12-16T18:54:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:54:47,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e2b0fc1437b97a0dc1f54092591722bf, disabling compactions & flushes 2024-12-16T18:54:47,339 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. after waiting 0 ms 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,339 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,339 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e2b0fc1437b97a0dc1f54092591722bf: 2024-12-16T18:54:47,341 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:54:47,341 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375287341"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375287341"}]},"ts":"1734375287341"} 2024-12-16T18:54:47,344 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:54:47,345 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:54:47,345 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375287345"}]},"ts":"1734375287345"} 2024-12-16T18:54:47,348 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:54:47,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e2b0fc1437b97a0dc1f54092591722bf, ASSIGN}] 2024-12-16T18:54:47,368 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e2b0fc1437b97a0dc1f54092591722bf, ASSIGN 2024-12-16T18:54:47,369 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e2b0fc1437b97a0dc1f54092591722bf, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42217,1734375286332; forceNewPlan=false, retain=false 2024-12-16T18:54:47,519 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e2b0fc1437b97a0dc1f54092591722bf, regionState=OPENING, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:47,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e2b0fc1437b97a0dc1f54092591722bf, server=7a6e4bf70377,42217,1734375286332}] 2024-12-16T18:54:47,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:47,674 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:47,678 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,678 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e2b0fc1437b97a0dc1f54092591722bf, NAME => 'hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:54:47,679 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,679 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:47,679 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,679 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,681 INFO [StoreOpener-e2b0fc1437b97a0dc1f54092591722bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,682 INFO [StoreOpener-e2b0fc1437b97a0dc1f54092591722bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2b0fc1437b97a0dc1f54092591722bf columnFamilyName info 2024-12-16T18:54:47,682 DEBUG [StoreOpener-e2b0fc1437b97a0dc1f54092591722bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,683 INFO [StoreOpener-e2b0fc1437b97a0dc1f54092591722bf-1 {}] regionserver.HStore(327): Store=e2b0fc1437b97a0dc1f54092591722bf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:54:47,684 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,684 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,687 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:54:47,689 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:54:47,690 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e2b0fc1437b97a0dc1f54092591722bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857455, jitterRate=0.09031052887439728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:54:47,691 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e2b0fc1437b97a0dc1f54092591722bf: 2024-12-16T18:54:47,692 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf., pid=6, masterSystemTime=1734375287674 2024-12-16T18:54:47,695 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,695 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:54:47,696 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e2b0fc1437b97a0dc1f54092591722bf, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:47,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:54:47,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e2b0fc1437b97a0dc1f54092591722bf, server=7a6e4bf70377,42217,1734375286332 in 177 msec 2024-12-16T18:54:47,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:54:47,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e2b0fc1437b97a0dc1f54092591722bf, ASSIGN in 335 msec 2024-12-16T18:54:47,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:54:47,706 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375287706"}]},"ts":"1734375287706"} 2024-12-16T18:54:47,710 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:54:47,728 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:54:47,729 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:54:47,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 411 msec 2024-12-16T18:54:47,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:47,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:54:47,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:54:47,742 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:54:47,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:54:47,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-12-16T18:54:47,774 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:54:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:54:47,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 28 msec 2024-12-16T18:54:47,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:54:47,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:54:47,841 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.463sec 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:54:47,842 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:54:47,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:54:47,844 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:54:47,844 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,36945,1734375286183-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:54:47,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x334edb69 to 127.0.0.1:63223 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@271a821c 2024-12-16T18:54:47,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53533cfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:54:47,869 DEBUG [hconnection-0x202a5eea-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:54:47,871 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:54:47,872 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,36945,1734375286183 2024-12-16T18:54:47,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:54:47,876 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:54:47,876 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-16T18:54:47,876 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-16T18:54:47,877 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T18:54:47,879 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T18:54:47,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-16T18:54:47,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-16T18:54:47,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:54:47,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-16T18:54:47,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:54:47,884 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:47,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-16T18:54:47,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:54:47,885 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:54:47,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741837_1013 (size=395) 2024-12-16T18:54:47,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741837_1013 (size=395) 2024-12-16T18:54:47,894 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 62192ff48e0ae8daf8449bd6a65d311e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89 2024-12-16T18:54:47,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35693 is added to blk_1073741838_1014 (size=78) 2024-12-16T18:54:47,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741838_1014 (size=78) 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing 62192ff48e0ae8daf8449bd6a65d311e, disabling compactions & flushes 2024-12-16T18:54:47,903 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. after waiting 0 ms 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:47,903 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:47,903 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for 62192ff48e0ae8daf8449bd6a65d311e: 2024-12-16T18:54:47,904 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:54:47,904 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1734375287904"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375287904"}]},"ts":"1734375287904"} 2024-12-16T18:54:47,906 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:54:47,907 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:54:47,908 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375287907"}]},"ts":"1734375287907"} 2024-12-16T18:54:47,909 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-16T18:54:47,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=62192ff48e0ae8daf8449bd6a65d311e, ASSIGN}] 2024-12-16T18:54:47,946 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=62192ff48e0ae8daf8449bd6a65d311e, ASSIGN 2024-12-16T18:54:47,947 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=62192ff48e0ae8daf8449bd6a65d311e, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42217,1734375286332; forceNewPlan=false, retain=false 2024-12-16T18:54:47,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:48,069 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:54:48,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:48,098 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=62192ff48e0ae8daf8449bd6a65d311e, regionState=OPENING, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:48,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 62192ff48e0ae8daf8449bd6a65d311e, server=7a6e4bf70377,42217,1734375286332}] 2024-12-16T18:54:48,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:48,254 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:48,257 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:48,258 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 62192ff48e0ae8daf8449bd6a65d311e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:54:48,258 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,258 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:54:48,258 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,258 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,259 INFO [StoreOpener-62192ff48e0ae8daf8449bd6a65d311e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,261 INFO [StoreOpener-62192ff48e0ae8daf8449bd6a65d311e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62192ff48e0ae8daf8449bd6a65d311e columnFamilyName info 2024-12-16T18:54:48,261 DEBUG [StoreOpener-62192ff48e0ae8daf8449bd6a65d311e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:54:48,262 INFO [StoreOpener-62192ff48e0ae8daf8449bd6a65d311e-1 {}] regionserver.HStore(327): Store=62192ff48e0ae8daf8449bd6a65d311e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:54:48,262 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,263 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,265 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:54:48,267 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:54:48,267 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 62192ff48e0ae8daf8449bd6a65d311e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830350, jitterRate=0.055845752358436584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:54:48,268 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 62192ff48e0ae8daf8449bd6a65d311e: 2024-12-16T18:54:48,269 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e., pid=11, masterSystemTime=1734375288253 2024-12-16T18:54:48,271 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:48,271 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:48,272 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=62192ff48e0ae8daf8449bd6a65d311e, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42217,1734375286332 2024-12-16T18:54:48,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T18:54:48,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 62192ff48e0ae8daf8449bd6a65d311e, server=7a6e4bf70377,42217,1734375286332 in 172 msec 2024-12-16T18:54:48,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T18:54:48,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=62192ff48e0ae8daf8449bd6a65d311e, ASSIGN in 330 msec 2024-12-16T18:54:48,277 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:54:48,278 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375288278"}]},"ts":"1734375288278"} 2024-12-16T18:54:48,279 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-16T18:54:48,321 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:54:48,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 440 msec 2024-12-16T18:54:48,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:48,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:49,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:49,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:49,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:50,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:50,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:50,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:51,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:51,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:51,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:52,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:52,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:52,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T18:54:52,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T18:54:52,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-16T18:54:52,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-16T18:54:52,533 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T18:54:52,533 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-16T18:54:52,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:53,181 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:54:53,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:54:53,228 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:54:53,229 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-16T18:54:53,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:53,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:53,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:54,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:54,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:54,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:55,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:55,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:55,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:56,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:56,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:56,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:57,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:57,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:54:57,887 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-16T18:54:57,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-16T18:54:57,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:54:57,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:58,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:58,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:58,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:59,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:59,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:54:59,896 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:54:59,898 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:59,898 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:59,898 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:54:59,898 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK], DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]) is bad. 2024-12-16T18:54:59,898 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK], DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]) is bad. 2024-12-16T18:54:59,898 WARN [PacketResponder: BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40047] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,899 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK], DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40047,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]) is bad. 2024-12-16T18:54:59,899 WARN [PacketResponder: BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40047] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1995461247_22 at /127.0.0.1:44076 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44076 dst: /127.0.0.1:40047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:39554 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39554 dst: /127.0.0.1:35693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1995461247_22 at /127.0.0.1:39534 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39534 dst: /127.0.0.1:35693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:39568 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39568 dst: /127.0.0.1:35693 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:44104 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44104 dst: /127.0.0.1:40047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:44096 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44096 dst: /127.0.0.1:40047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:54:59,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35a9519b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:54:59,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e942200{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:54:59,928 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:54:59,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56e5c03c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:54:59,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74c5d2c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:54:59,950 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:54:59,950 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:54:59,950 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 6fa5ce46-9b4e-48af-8e8f-22651392c095) service to localhost/127.0.0.1:35803 2024-12-16T18:54:59,950 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:54:59,951 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data3/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:59,951 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:54:59,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data4/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:54:59,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:54:59,975 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:54:59,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:54:59,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:54:59,976 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:54:59,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f9fb1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:54:59,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@477be5b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:54:59,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:00,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43c59d2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-45639-hadoop-hdfs-3_4_1-tests_jar-_-any-4573212058947193644/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:00,111 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2595e78b{HTTP/1.1, (http/1.1)}{localhost:45639} 2024-12-16T18:55:00,111 INFO [Time-limited test {}] server.Server(415): Started @204477ms 2024-12-16T18:55:00,113 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:00,164 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:00,164 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:00,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:53592 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53592 dst: /127.0.0.1:35693 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:00,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:53576 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53576 dst: /127.0.0.1:35693 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:00,166 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:00,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1995461247_22 at /127.0.0.1:53600 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35693:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53600 dst: /127.0.0.1:35693 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:00,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b56978{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:00,177 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@778bcc60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:00,177 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:00,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b0c3c75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:00,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fd029e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:00,180 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:55:00,180 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:55:00,182 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:55:00,182 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 75933a06-b540-4670-be91-29ae3cce9c45) service to localhost/127.0.0.1:35803 2024-12-16T18:55:00,183 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data2/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:00,183 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data1/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:00,184 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:55:00,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:00,210 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:00,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:00,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:00,226 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:55:00,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19d2efc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:00,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2532d298{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:00,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:00,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:00,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 after 68079ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:00,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7faf5209{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-38993-hadoop-hdfs-3_4_1-tests_jar-_-any-13823486514372644331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:00,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ddf655d{HTTP/1.1, (http/1.1)}{localhost:38993} 2024-12-16T18:55:00,371 INFO [Time-limited test {}] server.Server(415): Started @204737ms 2024-12-16T18:55:00,373 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:00,591 WARN [Thread-1102 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:00,608 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab4d307bc22b18be with lease ID 0x486fa5d1bc775422: from storage DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155 node DatanodeRegistration(127.0.0.1:35497, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=46457, infoSecurePort=0, ipcPort=39823, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T18:55:00,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab4d307bc22b18be with lease ID 0x486fa5d1bc775422: from storage DS-b24d1b6d-4adf-420e-9378-ad881bc69c00 node DatanodeRegistration(127.0.0.1:35497, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=46457, infoSecurePort=0, ipcPort=39823, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:00,891 WARN [Thread-1123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:00,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64c9094d91cc6568 with lease ID 0x486fa5d1bc775423: from storage DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4 node DatanodeRegistration(127.0.0.1:35091, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=44741, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:00,899 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64c9094d91cc6568 with lease ID 0x486fa5d1bc775423: from storage DS-ecb495d2-4e3e-4c19-932c-a0069e100e4e node DatanodeRegistration(127.0.0.1:35091, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=44741, infoSecurePort=0, ipcPort=37339, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:00,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:01,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:01,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:01,444 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-16T18:55:01,447 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-16T18:55:01,450 WARN [RS:0;7a6e4bf70377:42217.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:01,450 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C42217%2C1734375286332:(num 1734375286966) roll requested 2024-12-16T18:55:01,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42217 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:01,450 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:01,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42217 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33618 deadline: 1734375311449, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-16T18:55:01,456 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 newFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:01,457 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-16T18:55:01,457 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:01,457 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44741:44741),(127.0.0.1/127.0.0.1:46457:46457)] 2024-12-16T18:55:01,457 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 is not closed yet, will try archiving it next time 2024-12-16T18:55:01,457 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:01,457 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:01,457 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:55:01,458 WARN [IPC Server handler 0 on default port 35803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1015 2024-12-16T18:55:01,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 after 1ms 2024-12-16T18:55:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35091 is added to blk_1073741833_1019 (size=2136) 2024-12-16T18:55:01,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:02,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:02,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:02,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:03,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:03,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:03,604 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-16T18:55:03,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:04,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:04,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:04,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:05,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:05,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:05,459 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 after 4002ms 2024-12-16T18:55:05,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:06,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:06,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:06,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:07,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:07,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:07,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:08,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:08,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:08,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:09,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:09,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:09,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:10,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:10,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:11,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:11,015 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 23930 2024-12-16T18:55:11,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:11,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:12,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:12,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:12,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:13,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:13,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:13,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:13,500 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-16T18:55:14,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:14,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:14,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:15,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:15,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:15,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:15,504 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:15,504 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35091,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK], DatanodeInfoWithStorage[127.0.0.1:35497,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35091,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]) is bad. 2024-12-16T18:55:15,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:52180 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52180 dst: /127.0.0.1:35091 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:15,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:37572 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37572 dst: /127.0.0.1:35497 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:15,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7faf5209{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:15,515 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ddf655d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:15,516 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:15,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2532d298{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:15,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19d2efc1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:15,523 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:55:15,523 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:55:15,523 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:55:15,523 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 75933a06-b540-4670-be91-29ae3cce9c45) service to localhost/127.0.0.1:35803 2024-12-16T18:55:15,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data1/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:15,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data2/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:15,524 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:55:15,536 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:15,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:15,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:15,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:15,542 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:55:15,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b7f9e35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:15,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29c5d5b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:15,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d23c9d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-45225-hadoop-hdfs-3_4_1-tests_jar-_-any-2454866866723190992/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:15,641 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f5ef35{HTTP/1.1, (http/1.1)}{localhost:45225} 2024-12-16T18:55:15,641 INFO [Time-limited test {}] server.Server(415): Started @220007ms 2024-12-16T18:55:15,643 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:15,674 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:15,674 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1345949637_22 at /127.0.0.1:47414 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47414 dst: /127.0.0.1:35497 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:15,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43c59d2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:15,679 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2595e78b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:15,679 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:15,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@477be5b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:15,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f9fb1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:15,680 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:55:15,680 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:55:15,680 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 6fa5ce46-9b4e-48af-8e8f-22651392c095) service to localhost/127.0.0.1:35803 2024-12-16T18:55:15,680 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:55:15,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data3/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:15,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data4/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:15,682 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:55:15,691 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:15,695 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:15,696 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:15,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:15,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:55:15,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@879f137{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:15,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2241922f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:15,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@355d3e72{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/java.io.tmpdir/jetty-localhost-45023-hadoop-hdfs-3_4_1-tests_jar-_-any-8717968345296114883/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:15,803 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@280885c6{HTTP/1.1, (http/1.1)}{localhost:45023} 2024-12-16T18:55:15,804 INFO [Time-limited test {}] server.Server(415): Started @220170ms 2024-12-16T18:55:15,805 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:16,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:16,157 WARN [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(624): failed to reset thread pool timeout for FsDatasetAsyncDiskService java.lang.NullPointerException: Cannot invoke "Object.getClass()" because "dataset" is null at org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:609) ~[test-classes/:2.7.0-SNAPSHOT] 2024-12-16T18:55:16,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:16,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:16,384 WARN [Thread-1177 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53422feecf619482 with lease ID 0x486fa5d1bc775424: from storage DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4 node DatanodeRegistration(127.0.0.1:41991, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=39667, infoSecurePort=0, ipcPort=42551, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:16,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53422feecf619482 with lease ID 0x486fa5d1bc775424: from storage DS-ecb495d2-4e3e-4c19-932c-a0069e100e4e node DatanodeRegistration(127.0.0.1:41991, datanodeUuid=75933a06-b540-4670-be91-29ae3cce9c45, infoPort=39667, infoSecurePort=0, ipcPort=42551, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:16,448 WARN [Thread-1197 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b4ff66a42dc9bca with lease ID 0x486fa5d1bc775425: from storage DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155 node DatanodeRegistration(127.0.0.1:39711, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=35043, infoSecurePort=0, ipcPort=34963, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:16,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b4ff66a42dc9bca with lease ID 0x486fa5d1bc775425: from storage DS-b24d1b6d-4adf-420e-9378-ad881bc69c00 node DatanodeRegistration(127.0.0.1:39711, datanodeUuid=6fa5ce46-9b4e-48af-8e8f-22651392c095, infoPort=35043, infoSecurePort=0, ipcPort=34963, storageInfo=lv=-57;cid=testClusterID;nsid=952592992;c=1734375284143), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:16,747 WARN [master/7a6e4bf70377:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,748 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C36945%2C1734375286183:(num 1734375286459) roll requested 2024-12-16T18:55:16,748 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C36945%2C1734375286183.1734375316748 2024-12-16T18:55:16,748 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,748 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,767 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-16T18:55:16,767 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 with entries=92, filesize=46.00 KB; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375316748 2024-12-16T18:55:16,768 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39667:39667),(127.0.0.1/127.0.0.1:35043:35043)] 2024-12-16T18:55:16,768 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 is not closed yet, will try archiving it next time 2024-12-16T18:55:16,769 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,769 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,769 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 2024-12-16T18:55:16,770 WARN [IPC Server handler 4 on default port 35803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741830_1016 2024-12-16T18:55:16,770 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 after 1ms 2024-12-16T18:55:16,834 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-16T18:55:16,842 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-16T18:55:16,843 WARN [RS:0;7a6e4bf70377:42217.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35497,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,844 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C42217%2C1734375286332:(num 1734375301450) roll requested 2024-12-16T18:55:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42217 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35497,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,844 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42217 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33618 deadline: 1734375326843, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-16T18:55:16,859 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 newFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:16,859 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-16T18:55:16,859 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:16,859 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35043:35043),(127.0.0.1/127.0.0.1:39667:39667)] 2024-12-16T18:55:16,859 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 is not closed yet, will try archiving it next time 2024-12-16T18:55:16,859 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35497,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,859 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35497,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:16,860 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:16,860 WARN [IPC Server handler 4 on default port 35803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741839_1020 2024-12-16T18:55:16,860 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 after 0ms 2024-12-16T18:55:17,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:17,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 after 68058ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:17,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:17,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:18,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:18,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:18,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:18,387 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-16T18:55:19,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:19,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:19,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:20,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:20,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:20,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:20,771 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183/7a6e4bf70377%2C36945%2C1734375286183.1734375286459 after 4002ms 2024-12-16T18:55:20,802 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:55:20,804 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:55:20,861 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 after 4001ms 2024-12-16T18:55:21,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:21,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:21,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:21,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 2) replica(s): 0) Failed to delete replica blk_1073741830_1016: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-16T18:55:22,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:22,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:22,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:23,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:23,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:23,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:24,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:24,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:24,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:25,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:25,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:25,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:26,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:26,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:26,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:27,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:27,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:27,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:27,881 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T18:55:27,881 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T18:55:28,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:28,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:28,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:28,902 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:28,907 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 newFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:28,908 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:28,908 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39667:39667),(127.0.0.1/127.0.0.1:35043:35043)] 2024-12-16T18:55:28,909 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 is not closed yet, will try archiving it next time 2024-12-16T18:55:28,909 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:55:28,909 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:55:28,909 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 after 0ms 2024-12-16T18:55:28,909 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:55:28,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741841_1023 (size=1264) 2024-12-16T18:55:28,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741841_1023 (size=1264) 2024-12-16T18:55:28,919 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1734375287691/Put/vlen=162/seqid=0] 2024-12-16T18:55:28,919 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1734375287748/Put/vlen=9/seqid=0] 2024-12-16T18:55:28,919 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1734375287780/Put/vlen=7/seqid=0] 2024-12-16T18:55:28,919 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1734375288268/Put/vlen=218/seqid=0] 2024-12-16T18:55:28,920 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1734375297894/Put/vlen=1045/seqid=0] 2024-12-16T18:55:28,920 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375286966 2024-12-16T18:55:28,920 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:28,920 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:28,921 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 after 1ms 2024-12-16T18:55:28,921 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:28,924 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1734375311487/Put/vlen=1045/seqid=0] 2024-12-16T18:55:28,925 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1734375313501/Put/vlen=1045/seqid=0] 2024-12-16T18:55:28,925 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375301450 2024-12-16T18:55:28,925 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:28,925 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:28,925 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 after 0ms 2024-12-16T18:55:28,925 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375316844 2024-12-16T18:55:28,928 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1734375326900/Put/vlen=1045/seqid=0] 2024-12-16T18:55:28,929 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:28,929 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:28,929 WARN [IPC Server handler 4 on default port 35803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-16T18:55:28,929 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 after 0ms 2024-12-16T18:55:29,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:29,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:29,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:29,471 WARN [ResponseProcessor for block BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:29,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1995461247_22 at /127.0.0.1:36984 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:41991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36984 dst: /127.0.0.1:41991 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41991 remote=/127.0.0.1:36984]. Total timeout mills is 60000, 59436 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:29,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1995461247_22 at /127.0.0.1:35182 [Receiving block BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:39711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35182 dst: /127.0.0.1:39711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:29,471 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 block BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41991,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK], DatanodeInfoWithStorage[127.0.0.1:39711,DS-dac4ad17-b1f2-4c35-8782-fc5a329b3155,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41991,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]) is bad. 2024-12-16T18:55:29,476 WARN [DataStreamer for file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 block BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:29,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741842_1026 (size=85) 2024-12-16T18:55:30,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:30,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:30,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:31,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:31,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:31,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:32,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:32,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:32,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:32,930 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 after 4001ms 2024-12-16T18:55:32,930 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:32,934 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:32,934 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing e2b0fc1437b97a0dc1f54092591722bf 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:55:32,934 WARN [RS:0;7a6e4bf70377:42217.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,935 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C42217%2C1734375286332:(num 1734375328901) roll requested 2024-12-16T18:55:32,935 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for e2b0fc1437b97a0dc1f54092591722bf: 2024-12-16T18:55:32,935 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.1734375332935 2024-12-16T18:55:32,935 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,936 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-16T18:55:32,936 WARN [RS_OPEN_META-regionserver/7a6e4bf70377:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,936 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-16T18:55:32,936 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,936 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 62192ff48e0ae8daf8449bd6a65d311e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-16T18:55:32,937 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 62192ff48e0ae8daf8449bd6a65d311e: 2024-12-16T18:55:32,937 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,940 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:55:32,940 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T18:55:32,940 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x334edb69 to 127.0.0.1:63223 2024-12-16T18:55:32,940 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:55:32,940 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:55:32,940 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=513184309, stopped=false 2024-12-16T18:55:32,940 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,36945,1734375286183 2024-12-16T18:55:32,942 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 newFile=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375332935 2024-12-16T18:55:32,942 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-16T18:55:32,942 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375332935 2024-12-16T18:55:32,942 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39667:39667),(127.0.0.1/127.0.0.1:35043:35043)] 2024-12-16T18:55:32,942 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 is not closed yet, will try archiving it next time 2024-12-16T18:55:32,942 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7a6e4bf70377%2C42217%2C1734375286332.meta:.meta(num 1734375287221) roll requested 2024-12-16T18:55:32,942 INFO [regionserver/7a6e4bf70377:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42217%2C1734375286332.meta.1734375332942.meta 2024-12-16T18:55:32,942 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,942 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-23588281-172.17.0.2-1734375284143:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,943 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:32,943 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 after 0ms 2024-12-16T18:55:32,944 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 to hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs/7a6e4bf70377%2C42217%2C1734375286332.1734375328901 2024-12-16T18:55:32,949 WARN [regionserver/7a6e4bf70377:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-16T18:55:32,949 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375332942.meta 2024-12-16T18:55:32,949 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35043:35043),(127.0.0.1/127.0.0.1:39667:39667)] 2024-12-16T18:55:32,949 DEBUG [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta is not closed yet, will try archiving it next time 2024-12-16T18:55:32,949 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,949 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35693,DS-fabbc7c5-f24e-444a-b328-fdf94fee71c4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-16T18:55:32,950 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta 2024-12-16T18:55:32,950 WARN [IPC Server handler 1 on default port 35803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1017 2024-12-16T18:55:32,950 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta after 0ms 2024-12-16T18:55:32,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:55:32,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:55:32,985 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:55:32,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:32,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:32,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:55:32,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:55:32,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:55:32,985 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,42217,1734375286332' ***** 2024-12-16T18:55:32,985 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:55:32,986 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(3579): Received CLOSE for e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(3579): Received CLOSE for 62192ff48e0ae8daf8449bd6a65d311e 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,42217,1734375286332 2024-12-16T18:55:32,986 DEBUG [RS:0;7a6e4bf70377:42217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:55:32,986 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e2b0fc1437b97a0dc1f54092591722bf, disabling compactions & flushes 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:55:32,986 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:55:32,986 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:55:32,986 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:55:32,986 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. after waiting 0 ms 2024-12-16T18:55:32,986 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:55:32,987 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e2b0fc1437b97a0dc1f54092591722bf 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-16T18:55:32,987 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-16T18:55:32,987 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1603): Online Regions={e2b0fc1437b97a0dc1f54092591722bf=hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf., 1588230740=hbase:meta,,1.1588230740, 62192ff48e0ae8daf8449bd6a65d311e=TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e.} 2024-12-16T18:55:32,987 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:55:32,987 WARN [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-16T18:55:32,987 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:55:32,987 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:55:32,987 DEBUG [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 62192ff48e0ae8daf8449bd6a65d311e, e2b0fc1437b97a0dc1f54092591722bf 2024-12-16T18:55:32,987 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:55:32,987 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:55:32,987 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-16T18:55:32,987 WARN [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-16T18:55:32,987 WARN [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-16T18:55:33,001 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/.tmp/info/c3f1f26cf98c451d901dee1b5dcfd1f5 is 45, key is default/info:d/1734375287748/Put/seqid=0 2024-12-16T18:55:33,001 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/info/6d854cdae2bc4627a28fe1bb933b1b8b is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e./info:regioninfo/1734375288271/Put/seqid=0 2024-12-16T18:55:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741845_1030 (size=8268) 2024-12-16T18:55:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741845_1030 (size=8268) 2024-12-16T18:55:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741846_1031 (size=5037) 2024-12-16T18:55:33,008 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.66 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/info/6d854cdae2bc4627a28fe1bb933b1b8b 2024-12-16T18:55:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741846_1031 (size=5037) 2024-12-16T18:55:33,008 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/.tmp/info/c3f1f26cf98c451d901dee1b5dcfd1f5 2024-12-16T18:55:33,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:33,017 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/.tmp/info/c3f1f26cf98c451d901dee1b5dcfd1f5 as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/info/c3f1f26cf98c451d901dee1b5dcfd1f5 2024-12-16T18:55:33,027 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/info/c3f1f26cf98c451d901dee1b5dcfd1f5, entries=2, sequenceid=8, filesize=4.9 K 2024-12-16T18:55:33,028 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e2b0fc1437b97a0dc1f54092591722bf in 42ms, sequenceid=8, compaction requested=false 2024-12-16T18:55:33,041 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/namespace/e2b0fc1437b97a0dc1f54092591722bf/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-16T18:55:33,041 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:55:33,041 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e2b0fc1437b97a0dc1f54092591722bf: 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375287318.e2b0fc1437b97a0dc1f54092591722bf. 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 62192ff48e0ae8daf8449bd6a65d311e, disabling compactions & flushes 2024-12-16T18:55:33,042 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. after waiting 0 ms 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:55:33,042 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/table/8228b16a0fbc4108ae00fc846eaa1e7b is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1734375288278/Put/seqid=0 2024-12-16T18:55:33,042 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 62192ff48e0ae8daf8449bd6a65d311e 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-16T18:55:33,042 WARN [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-16T18:55:33,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741847_1032 (size=5482) 2024-12-16T18:55:33,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741847_1032 (size=5482) 2024-12-16T18:55:33,052 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=244 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/table/8228b16a0fbc4108ae00fc846eaa1e7b 2024-12-16T18:55:33,060 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/info/6d854cdae2bc4627a28fe1bb933b1b8b as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/info/6d854cdae2bc4627a28fe1bb933b1b8b 2024-12-16T18:55:33,067 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/.tmp/info/2101b53b54654fa39e2a1343a65f602e is 1080, key is row1002/info:/1734375297894/Put/seqid=0 2024-12-16T18:55:33,069 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/info/6d854cdae2bc4627a28fe1bb933b1b8b, entries=20, sequenceid=16, filesize=8.1 K 2024-12-16T18:55:33,071 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/.tmp/table/8228b16a0fbc4108ae00fc846eaa1e7b as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/table/8228b16a0fbc4108ae00fc846eaa1e7b 2024-12-16T18:55:33,080 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/table/8228b16a0fbc4108ae00fc846eaa1e7b, entries=4, sequenceid=16, filesize=5.4 K 2024-12-16T18:55:33,083 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.90 KB/2972, heapSize ~5.14 KB/5264, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=16, compaction requested=false 2024-12-16T18:55:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741848_1033 (size=9270) 2024-12-16T18:55:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741848_1033 (size=9270) 2024-12-16T18:55:33,092 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/.tmp/info/2101b53b54654fa39e2a1343a65f602e 2024-12-16T18:55:33,102 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/.tmp/info/2101b53b54654fa39e2a1343a65f602e as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/info/2101b53b54654fa39e2a1343a65f602e 2024-12-16T18:55:33,113 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/info/2101b53b54654fa39e2a1343a65f602e, entries=4, sequenceid=12, filesize=9.1 K 2024-12-16T18:55:33,122 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-12-16T18:55:33,122 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 62192ff48e0ae8daf8449bd6a65d311e in 80ms, sequenceid=12, compaction requested=false 2024-12-16T18:55:33,123 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:55:33,123 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:55:33,123 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:55:33,124 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T18:55:33,131 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/data/default/TestLogRolling-testLogRollOnPipelineRestart/62192ff48e0ae8daf8449bd6a65d311e/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-12-16T18:55:33,132 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:55:33,132 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 62192ff48e0ae8daf8449bd6a65d311e: 2024-12-16T18:55:33,132 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734375287880.62192ff48e0ae8daf8449bd6a65d311e. 2024-12-16T18:55:33,187 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,42217,1734375286332; all regions closed. 2024-12-16T18:55:33,187 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332 2024-12-16T18:55:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741844_1028 (size=761) 2024-12-16T18:55:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741844_1028 (size=761) 2024-12-16T18:55:33,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:33,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:33,828 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-16T18:55:33,828 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-16T18:55:34,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:34,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:34,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:34,840 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:55:35,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:35,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:35,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:36,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:36,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:36,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta after 68059ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:55:36,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:36,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1017: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-16T18:55:36,951 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta after 4001ms 2024-12-16T18:55:36,951 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta to hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs/7a6e4bf70377%2C42217%2C1734375286332.meta.1734375287221.meta 2024-12-16T18:55:36,954 DEBUG [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs 2024-12-16T18:55:36,954 INFO [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C42217%2C1734375286332.meta:.meta(num 1734375332942) 2024-12-16T18:55:36,955 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/WALs/7a6e4bf70377,42217,1734375286332 2024-12-16T18:55:36,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741843_1027 (size=1979) 2024-12-16T18:55:36,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741843_1027 (size=1979) 2024-12-16T18:55:36,961 DEBUG [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(1071): Moved 4 WAL file(s) to /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/oldWALs 2024-12-16T18:55:36,961 INFO [RS:0;7a6e4bf70377:42217 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C42217%2C1734375286332:(num 1734375332935) 2024-12-16T18:55:36,961 DEBUG [RS:0;7a6e4bf70377:42217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:55:36,961 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:55:36,961 INFO [RS:0;7a6e4bf70377:42217 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T18:55:36,961 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:55:36,961 INFO [RS:0;7a6e4bf70377:42217 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42217 2024-12-16T18:55:37,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:37,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:55:37,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,42217,1734375286332 2024-12-16T18:55:37,064 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,42217,1734375286332] 2024-12-16T18:55:37,065 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,42217,1734375286332; numProcessing=1 2024-12-16T18:55:37,106 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,42217,1734375286332 already deleted, retry=false 2024-12-16T18:55:37,106 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,42217,1734375286332 expired; onlineServers=0 2024-12-16T18:55:37,106 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,36945,1734375286183' ***** 2024-12-16T18:55:37,106 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:55:37,106 DEBUG [M:0;7a6e4bf70377:36945 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48f87d89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:55:37,107 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,36945,1734375286183 2024-12-16T18:55:37,107 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,36945,1734375286183; all regions closed. 2024-12-16T18:55:37,107 DEBUG [M:0;7a6e4bf70377:36945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:55:37,107 DEBUG [M:0;7a6e4bf70377:36945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:55:37,107 DEBUG [M:0;7a6e4bf70377:36945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:55:37,107 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:55:37,107 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375286758 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375286758,5,FailOnTimeoutGroup] 2024-12-16T18:55:37,107 INFO [M:0;7a6e4bf70377:36945 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:55:37,107 DEBUG [M:0;7a6e4bf70377:36945 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:55:37,107 INFO [M:0;7a6e4bf70377:36945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:55:37,107 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375286774 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375286774,5,FailOnTimeoutGroup] 2024-12-16T18:55:37,107 INFO [M:0;7a6e4bf70377:36945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:55:37,108 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:55:37,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:55:37,118 DEBUG [M:0;7a6e4bf70377:36945 {}] zookeeper.ZKUtil(347): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:55:37,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:37,118 WARN [M:0;7a6e4bf70377:36945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:55:37,118 INFO [M:0;7a6e4bf70377:36945 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:55:37,118 INFO [M:0;7a6e4bf70377:36945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:55:37,118 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:55:37,118 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:37,118 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:37,118 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:55:37,118 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:37,118 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.10 KB heapSize=49.26 KB 2024-12-16T18:55:37,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:55:37,133 DEBUG [M:0;7a6e4bf70377:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d6a32b3bc084618a9954158d13ab244 is 82, key is hbase:meta,,1/info:regioninfo/1734375287251/Put/seqid=0 2024-12-16T18:55:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741849_1034 (size=5672) 2024-12-16T18:55:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741849_1034 (size=5672) 2024-12-16T18:55:37,139 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d6a32b3bc084618a9954158d13ab244 2024-12-16T18:55:37,162 DEBUG [M:0;7a6e4bf70377:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cacbec938c0e4d488431934b4f3447aa is 779, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734375288324/Put/seqid=0 2024-12-16T18:55:37,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:55:37,165 INFO [RS:0;7a6e4bf70377:42217 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,42217,1734375286332; zookeeper connection closed. 2024-12-16T18:55:37,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42217-0x100e454a4490001, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:55:37,165 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e3c6c99 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e3c6c99 2024-12-16T18:55:37,165 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T18:55:37,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741850_1035 (size=7470) 2024-12-16T18:55:37,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741850_1035 (size=7470) 2024-12-16T18:55:37,168 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.50 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cacbec938c0e4d488431934b4f3447aa 2024-12-16T18:55:37,193 DEBUG [M:0;7a6e4bf70377:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c1e10d08a3a349e685fd350632c73e55 is 69, key is 7a6e4bf70377,42217,1734375286332/rs:state/1734375286811/Put/seqid=0 2024-12-16T18:55:37,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741851_1036 (size=5156) 2024-12-16T18:55:37,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741851_1036 (size=5156) 2024-12-16T18:55:37,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:37,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:37,600 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c1e10d08a3a349e685fd350632c73e55 2024-12-16T18:55:37,628 DEBUG [M:0;7a6e4bf70377:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fad288d8b93f45dc9e6160f0900624ae is 52, key is load_balancer_on/state:d/1734375287874/Put/seqid=0 2024-12-16T18:55:37,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741852_1037 (size=5056) 2024-12-16T18:55:37,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741852_1037 (size=5056) 2024-12-16T18:55:37,636 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fad288d8b93f45dc9e6160f0900624ae 2024-12-16T18:55:37,647 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d6a32b3bc084618a9954158d13ab244 as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d6a32b3bc084618a9954158d13ab244 2024-12-16T18:55:37,654 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d6a32b3bc084618a9954158d13ab244, entries=8, sequenceid=96, filesize=5.5 K 2024-12-16T18:55:37,659 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cacbec938c0e4d488431934b4f3447aa as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cacbec938c0e4d488431934b4f3447aa 2024-12-16T18:55:37,666 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cacbec938c0e4d488431934b4f3447aa, entries=11, sequenceid=96, filesize=7.3 K 2024-12-16T18:55:37,667 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c1e10d08a3a349e685fd350632c73e55 as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c1e10d08a3a349e685fd350632c73e55 2024-12-16T18:55:37,673 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c1e10d08a3a349e685fd350632c73e55, entries=1, sequenceid=96, filesize=5.0 K 2024-12-16T18:55:37,675 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fad288d8b93f45dc9e6160f0900624ae as hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fad288d8b93f45dc9e6160f0900624ae 2024-12-16T18:55:37,681 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35803/user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fad288d8b93f45dc9e6160f0900624ae, entries=1, sequenceid=96, filesize=4.9 K 2024-12-16T18:55:37,682 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.10 KB/41064, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 564ms, sequenceid=96, compaction requested=false 2024-12-16T18:55:37,686 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:37,686 DEBUG [M:0;7a6e4bf70377:36945 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:55:37,690 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/a7e45959-c638-bcde-1278-a4fc77470c89/MasterData/WALs/7a6e4bf70377,36945,1734375286183 2024-12-16T18:55:37,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741840_1021 (size=757) 2024-12-16T18:55:37,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41991 is added to blk_1073741840_1021 (size=757) 2024-12-16T18:55:37,693 INFO [M:0;7a6e4bf70377:36945 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:55:37,693 INFO [M:0;7a6e4bf70377:36945 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36945 2024-12-16T18:55:37,693 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:55:37,714 DEBUG [M:0;7a6e4bf70377:36945 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,36945,1734375286183 already deleted, retry=false 2024-12-16T18:55:37,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:55:37,865 INFO [M:0;7a6e4bf70377:36945 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,36945,1734375286183; zookeeper connection closed. 2024-12-16T18:55:37,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x100e454a4490000, quorum=127.0.0.1:63223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:55:37,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@355d3e72{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:37,927 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@280885c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:37,927 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:37,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2241922f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:37,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@879f137{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:37,936 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:55:37,936 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 6fa5ce46-9b4e-48af-8e8f-22651392c095) service to localhost/127.0.0.1:35803 2024-12-16T18:55:37,936 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:55:37,936 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:55:37,937 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data3/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:37,937 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data4/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:37,937 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:55:37,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d23c9d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:37,944 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f5ef35{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:37,944 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:37,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29c5d5b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:37,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b7f9e35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:37,947 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:55:37,947 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:55:37,947 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:55:37,947 WARN [BP-23588281-172.17.0.2-1734375284143 heartbeating to localhost/127.0.0.1:35803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23588281-172.17.0.2-1734375284143 (Datanode Uuid 75933a06-b540-4670-be91-29ae3cce9c45) service to localhost/127.0.0.1:35803 2024-12-16T18:55:37,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data1/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:37,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/cluster_c01e9cf4-5336-d548-afd4-32ca93d02ab5/dfs/data/data2/current/BP-23588281-172.17.0.2-1734375284143 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:55:37,948 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:55:37,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@377bc93b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:55:37,961 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24cd7079{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:55:37,962 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:55:37,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b87ebe3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:55:37,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@796327b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir/,STOPPED} 2024-12-16T18:55:37,971 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:55:37,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:55:38,009 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=99 (was 86) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (679588824) connection to localhost/127.0.0.1:35803 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 410) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=327 (was 256) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4748 (was 4964) 2024-12-16T18:55:38,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:38,025 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=99, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=327, ProcessCount=11, AvailableMemoryMB=4744 2024-12-16T18:55:38,025 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.log.dir so I do NOT create it in target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/968cfce6-f9d9-51ee-149d-427615bcf174/hadoop.tmp.dir so I do NOT create it in target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c, deleteOnExit=true 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/test.cache.data in system properties and HBase conf 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:55:38,026 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:55:38,027 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:55:38,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:55:38,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:55:38,045 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:55:38,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:38,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:38,581 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:55:38,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:38,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:38,632 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:38,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:38,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:38,634 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:55:38,634 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:38,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@787f6765{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:38,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8dbcd51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:38,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6843989b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/java.io.tmpdir/jetty-localhost-46821-hadoop-hdfs-3_4_1-tests_jar-_-any-7665086418261732505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:55:38,728 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20f985a7{HTTP/1.1, (http/1.1)}{localhost:46821} 2024-12-16T18:55:38,728 INFO [Time-limited test {}] server.Server(415): Started @243095ms 2024-12-16T18:55:38,741 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:55:39,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:39,109 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:39,112 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:39,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:39,113 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:39,113 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:55:39,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48a21b91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:39,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e333cd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:39,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@102c13ed{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/java.io.tmpdir/jetty-localhost-40719-hadoop-hdfs-3_4_1-tests_jar-_-any-13308336831310125374/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:39,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1400588a{HTTP/1.1, (http/1.1)}{localhost:40719} 2024-12-16T18:55:39,208 INFO [Time-limited test {}] server.Server(415): Started @243574ms 2024-12-16T18:55:39,209 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:39,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:55:39,250 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:55:39,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:55:39,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:55:39,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:55:39,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@581d3af9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:55:39,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31c93d20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:55:39,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:39,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@206aa743{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/java.io.tmpdir/jetty-localhost-45221-hadoop-hdfs-3_4_1-tests_jar-_-any-2177943847485003182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:55:39,347 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5279ded4{HTTP/1.1, (http/1.1)}{localhost:45221} 2024-12-16T18:55:39,347 INFO [Time-limited test {}] server.Server(415): Started @243714ms 2024-12-16T18:55:39,349 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:55:39,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:40,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:40,094 WARN [Thread-1414 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data1/current/BP-304968768-172.17.0.2-1734375338081/current, will proceed with Du for space computation calculation, 2024-12-16T18:55:40,098 WARN [Thread-1415 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data2/current/BP-304968768-172.17.0.2-1734375338081/current, will proceed with Du for space computation calculation, 2024-12-16T18:55:40,119 WARN [Thread-1378 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3618e38daeab9633 with lease ID 0xfff4f6b5ea2246b8: Processing first storage report for DS-8e9a8248-ee6d-44ce-b228-990a77cedab1 from datanode DatanodeRegistration(127.0.0.1:35711, datanodeUuid=370b6431-714e-4987-a944-9f1f7ed2f740, infoPort=44177, infoSecurePort=0, ipcPort=45829, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081) 2024-12-16T18:55:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3618e38daeab9633 with lease ID 0xfff4f6b5ea2246b8: from storage DS-8e9a8248-ee6d-44ce-b228-990a77cedab1 node DatanodeRegistration(127.0.0.1:35711, datanodeUuid=370b6431-714e-4987-a944-9f1f7ed2f740, infoPort=44177, infoSecurePort=0, ipcPort=45829, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3618e38daeab9633 with lease ID 0xfff4f6b5ea2246b8: Processing first storage report for DS-345745f6-d791-474e-b505-89fb389d7396 from datanode DatanodeRegistration(127.0.0.1:35711, datanodeUuid=370b6431-714e-4987-a944-9f1f7ed2f740, infoPort=44177, infoSecurePort=0, ipcPort=45829, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081) 2024-12-16T18:55:40,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3618e38daeab9633 with lease ID 0xfff4f6b5ea2246b8: from storage DS-345745f6-d791-474e-b505-89fb389d7396 node DatanodeRegistration(127.0.0.1:35711, datanodeUuid=370b6431-714e-4987-a944-9f1f7ed2f740, infoPort=44177, infoSecurePort=0, ipcPort=45829, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:40,216 WARN [Thread-1426 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data4/current/BP-304968768-172.17.0.2-1734375338081/current, will proceed with Du for space computation calculation, 2024-12-16T18:55:40,216 WARN [Thread-1425 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data3/current/BP-304968768-172.17.0.2-1734375338081/current, will proceed with Du for space computation calculation, 2024-12-16T18:55:40,231 WARN [Thread-1401 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:55:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe65eb5d9bd496ce8 with lease ID 0xfff4f6b5ea2246b9: Processing first storage report for DS-14842dd7-6937-49a1-9c88-c2dc55a25bcd from datanode DatanodeRegistration(127.0.0.1:36025, datanodeUuid=dff0078f-140f-4c23-ac07-60e2185a30e9, infoPort=43743, infoSecurePort=0, ipcPort=38751, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081) 2024-12-16T18:55:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe65eb5d9bd496ce8 with lease ID 0xfff4f6b5ea2246b9: from storage DS-14842dd7-6937-49a1-9c88-c2dc55a25bcd node DatanodeRegistration(127.0.0.1:36025, datanodeUuid=dff0078f-140f-4c23-ac07-60e2185a30e9, infoPort=43743, infoSecurePort=0, ipcPort=38751, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:55:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe65eb5d9bd496ce8 with lease ID 0xfff4f6b5ea2246b9: Processing first storage report for DS-70041ce1-181c-4686-a339-47b8792585b4 from datanode DatanodeRegistration(127.0.0.1:36025, datanodeUuid=dff0078f-140f-4c23-ac07-60e2185a30e9, infoPort=43743, infoSecurePort=0, ipcPort=38751, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081) 2024-12-16T18:55:40,234 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe65eb5d9bd496ce8 with lease ID 0xfff4f6b5ea2246b9: from storage DS-70041ce1-181c-4686-a339-47b8792585b4 node DatanodeRegistration(127.0.0.1:36025, datanodeUuid=dff0078f-140f-4c23-ac07-60e2185a30e9, infoPort=43743, infoSecurePort=0, ipcPort=38751, storageInfo=lv=-57;cid=testClusterID;nsid=508510089;c=1734375338081), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T18:55:40,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d 2024-12-16T18:55:40,294 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/zookeeper_0, clientPort=55775, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:55:40,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:40,295 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55775 2024-12-16T18:55:40,295 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:55:40,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:55:40,315 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d with version=8 2024-12-16T18:55:40,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:55:40,318 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:55:40,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:55:40,319 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:55:40,320 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42913 2024-12-16T18:55:40,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,326 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:42913 connecting to ZooKeeper ensemble=127.0.0.1:55775 2024-12-16T18:55:40,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:40,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429130x0, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:55:40,373 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42913-0x100e45577bf0000 connected 2024-12-16T18:55:40,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:55:40,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:55:40,449 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:55:40,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-16T18:55:40,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42913 2024-12-16T18:55:40,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42913 2024-12-16T18:55:40,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-16T18:55:40,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-16T18:55:40,450 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d, hbase.cluster.distributed=false 2024-12-16T18:55:40,472 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:55:40,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,472 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:55:40,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:55:40,473 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:55:40,473 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:55:40,473 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:55:40,474 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44607 2024-12-16T18:55:40,474 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:55:40,475 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:55:40,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,479 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,483 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44607 connecting to ZooKeeper ensemble=127.0.0.1:55775 2024-12-16T18:55:40,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446070x0, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:55:40,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446070x0, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:55:40,489 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44607-0x100e45577bf0001 connected 2024-12-16T18:55:40,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:55:40,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:55:40,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44607 2024-12-16T18:55:40,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44607 2024-12-16T18:55:40,494 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44607 2024-12-16T18:55:40,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44607 2024-12-16T18:55:40,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44607 2024-12-16T18:55:40,500 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:55:40,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:55:40,506 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,511 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:42913 2024-12-16T18:55:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:55:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:55:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,514 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:55:40,514 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,42913,1734375340318 from backup master directory 2024-12-16T18:55:40,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:55:40,522 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:55:40,522 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:55:40,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:55:40,522 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:55:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:55:40,540 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/hbase.id with ID: 01f2de68-58d4-4f0f-a47e-4ab35337667b 2024-12-16T18:55:40,551 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:40,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:55:40,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:55:40,582 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:55:40,583 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:55:40,583 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:55:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:55:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:55:40,596 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store 2024-12-16T18:55:40,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:55:40,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:55:40,605 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:40,605 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:55:40,605 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:55:40,606 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/.initializing 2024-12-16T18:55:40,606 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/WALs/7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,609 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42913%2C1734375340318, suffix=, logDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/WALs/7a6e4bf70377,42913,1734375340318, archiveDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/oldWALs, maxLogs=10 2024-12-16T18:55:40,609 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42913%2C1734375340318.1734375340609 2024-12-16T18:55:40,620 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/WALs/7a6e4bf70377,42913,1734375340318/7a6e4bf70377%2C42913%2C1734375340318.1734375340609 2024-12-16T18:55:40,620 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44177:44177),(127.0.0.1/127.0.0.1:43743:43743)] 2024-12-16T18:55:40,620 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:55:40,620 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:40,621 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,621 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:55:40,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:40,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:55:40,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:55:40,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:55:40,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:55:40,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:55:40,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:55:40,634 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,635 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,637 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:55:40,638 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:55:40,641 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:55:40,641 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694101, jitterRate=-0.11740611493587494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:55:40,642 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:55:40,644 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:55:40,648 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39b8b8a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:55:40,648 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:55:40,649 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:55:40,649 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:55:40,649 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:55:40,649 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-16T18:55:40,650 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-16T18:55:40,650 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:55:40,653 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:55:40,654 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:55:40,664 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:55:40,664 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:55:40,664 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:55:40,678 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:55:40,678 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:55:40,679 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:55:40,689 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:55:40,689 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:55:40,703 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:55:40,704 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:55:40,714 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:55:40,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:55:40,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:55:40,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,724 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,42913,1734375340318, sessionid=0x100e45577bf0000, setting cluster-up flag (Was=false) 2024-12-16T18:55:40,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,772 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:55:40,773 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:40,814 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:55:40,815 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:40,818 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:55:40,818 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:55:40,818 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,42913,1734375340318 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,819 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:55:40,820 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375370821 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:55:40,821 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,821 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:55:40,821 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:55:40,822 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:55:40,822 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:55:40,822 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:55:40,822 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:55:40,822 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:55:40,823 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,823 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:55:40,826 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375340822,5,FailOnTimeoutGroup] 2024-12-16T18:55:40,832 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375340826,5,FailOnTimeoutGroup] 2024-12-16T18:55:40,832 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,832 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:55:40,832 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,832 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:55:40,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:55:40,839 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:55:40,839 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d 2024-12-16T18:55:40,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:55:40,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:55:40,855 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:40,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:55:40,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:55:40,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:40,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:55:40,866 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:55:40,866 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:40,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:55:40,868 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:55:40,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:40,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:40,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740 2024-12-16T18:55:40,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740 2024-12-16T18:55:40,873 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:55:40,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:55:40,877 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:55:40,877 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867917, jitterRate=0.10361461341381073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:55:40,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:55:40,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:55:40,878 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:55:40,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:55:40,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:55:40,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:55:40,878 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:55:40,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:55:40,880 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:55:40,880 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:55:40,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:55:40,882 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:55:40,886 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:55:40,916 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:44607 2024-12-16T18:55:40,917 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1008): ClusterId : 01f2de68-58d4-4f0f-a47e-4ab35337667b 2024-12-16T18:55:40,917 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:55:40,923 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:55:40,923 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:55:40,931 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:55:40,931 DEBUG [RS:0;7a6e4bf70377:44607 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@765ae348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:55:40,932 DEBUG [RS:0;7a6e4bf70377:44607 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8c0901, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:55:40,932 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:55:40,932 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:55:40,932 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:55:40,932 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,42913,1734375340318 with isa=7a6e4bf70377/172.17.0.2:44607, startcode=1734375340471 2024-12-16T18:55:40,933 DEBUG [RS:0;7a6e4bf70377:44607 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:55:40,936 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35577, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:55:40,937 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42913 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,937 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42913 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,938 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d 2024-12-16T18:55:40,938 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43947 2024-12-16T18:55:40,939 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:55:40,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:55:40,947 DEBUG [RS:0;7a6e4bf70377:44607 {}] zookeeper.ZKUtil(111): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,947 WARN [RS:0;7a6e4bf70377:44607 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:55:40,948 INFO [RS:0;7a6e4bf70377:44607 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:55:40,948 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,44607,1734375340471] 2024-12-16T18:55:40,952 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:55:40,953 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:55:40,956 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:55:40,956 INFO [RS:0;7a6e4bf70377:44607 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:55:40,956 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,956 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:55:40,957 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:55:40,958 DEBUG [RS:0;7a6e4bf70377:44607 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:55:40,959 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,959 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,959 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,959 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,959 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,44607,1734375340471-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:55:40,976 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:55:40,976 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,44607,1734375340471-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:40,991 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.Replication(204): 7a6e4bf70377,44607,1734375340471 started 2024-12-16T18:55:40,991 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,44607,1734375340471, RpcServer on 7a6e4bf70377/172.17.0.2:44607, sessionid=0x100e45577bf0001 2024-12-16T18:55:40,991 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:55:40,991 DEBUG [RS:0;7a6e4bf70377:44607 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,991 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,44607,1734375340471' 2024-12-16T18:55:40,991 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,44607,1734375340471' 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:55:40,992 DEBUG [RS:0;7a6e4bf70377:44607 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:55:40,993 INFO [RS:0;7a6e4bf70377:44607 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:55:40,993 INFO [RS:0;7a6e4bf70377:44607 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:55:41,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:41,036 WARN [7a6e4bf70377:42913 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T18:55:41,095 INFO [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C44607%2C1734375340471, suffix=, logDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471, archiveDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs, maxLogs=32 2024-12-16T18:55:41,096 INFO [RS:0;7a6e4bf70377:44607 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44607%2C1734375340471.1734375341096 2024-12-16T18:55:41,106 INFO [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375341096 2024-12-16T18:55:41,106 DEBUG [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43743:43743),(127.0.0.1/127.0.0.1:44177:44177)] 2024-12-16T18:55:41,286 DEBUG [7a6e4bf70377:42913 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:55:41,287 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,288 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,44607,1734375340471, state=OPENING 2024-12-16T18:55:41,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:41,297 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:55:41,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:41,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:41,306 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:55:41,306 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:55:41,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,44607,1734375340471}] 2024-12-16T18:55:41,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:41,459 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,459 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:55:41,464 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:55:41,468 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:55:41,468 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:55:41,470 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C44607%2C1734375340471.meta, suffix=.meta, logDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471, archiveDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs, maxLogs=32 2024-12-16T18:55:41,471 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44607%2C1734375340471.meta.1734375341470.meta 2024-12-16T18:55:41,485 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.meta.1734375341470.meta 2024-12-16T18:55:41,485 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43743:43743),(127.0.0.1/127.0.0.1:44177:44177)] 2024-12-16T18:55:41,485 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:55:41,486 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:55:41,486 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:55:41,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:55:41,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:55:41,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:41,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:41,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:55:41,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:55:41,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:41,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:41,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:55:41,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:55:41,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:41,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:55:41,495 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740 2024-12-16T18:55:41,503 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740 2024-12-16T18:55:41,505 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:55:41,506 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:55:41,507 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697543, jitterRate=-0.11302852630615234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:55:41,508 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:55:41,508 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375341459 2024-12-16T18:55:41,512 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:55:41,512 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:55:41,513 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,518 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,44607,1734375340471, state=OPEN 2024-12-16T18:55:41,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:55:41,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:55:41,548 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:55:41,548 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:55:41,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:55:41,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,44607,1734375340471 in 242 msec 2024-12-16T18:55:41,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:55:41,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-12-16T18:55:41,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 737 msec 2024-12-16T18:55:41,555 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375341554, completionTime=-1 2024-12-16T18:55:41,555 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:55:41,555 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:55:41,556 DEBUG [hconnection-0x68c1be64-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:55:41,558 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:55:41,562 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:55:41,562 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375401562 2024-12-16T18:55:41,562 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375461562 2024-12-16T18:55:41,562 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 7 msec 2024-12-16T18:55:41,589 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:41,589 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:41,589 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:41,590 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:42913, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:41,590 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:41,590 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:55:41,590 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:55:41,591 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:55:41,592 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:55:41,593 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:55:41,593 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:41,594 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:55:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:55:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:55:41,604 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9a14bb9d2ec262933cef92c96a4c2892, NAME => 'hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d 2024-12-16T18:55:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:55:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 9a14bb9d2ec262933cef92c96a4c2892, disabling compactions & flushes 2024-12-16T18:55:41,613 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. after waiting 0 ms 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,613 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,613 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9a14bb9d2ec262933cef92c96a4c2892: 2024-12-16T18:55:41,614 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:55:41,615 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375341614"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375341614"}]},"ts":"1734375341614"} 2024-12-16T18:55:41,618 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:55:41,619 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:55:41,620 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375341619"}]},"ts":"1734375341619"} 2024-12-16T18:55:41,622 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:55:41,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9a14bb9d2ec262933cef92c96a4c2892, ASSIGN}] 2024-12-16T18:55:41,640 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9a14bb9d2ec262933cef92c96a4c2892, ASSIGN 2024-12-16T18:55:41,641 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=9a14bb9d2ec262933cef92c96a4c2892, ASSIGN; state=OFFLINE, location=7a6e4bf70377,44607,1734375340471; forceNewPlan=false, retain=false 2024-12-16T18:55:41,792 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9a14bb9d2ec262933cef92c96a4c2892, regionState=OPENING, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 9a14bb9d2ec262933cef92c96a4c2892, server=7a6e4bf70377,44607,1734375340471}] 2024-12-16T18:55:41,946 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,951 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,951 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 9a14bb9d2ec262933cef92c96a4c2892, NAME => 'hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:55:41,951 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,952 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:41,952 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,952 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,953 INFO [StoreOpener-9a14bb9d2ec262933cef92c96a4c2892-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,955 INFO [StoreOpener-9a14bb9d2ec262933cef92c96a4c2892-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9a14bb9d2ec262933cef92c96a4c2892 columnFamilyName info 2024-12-16T18:55:41,955 DEBUG [StoreOpener-9a14bb9d2ec262933cef92c96a4c2892-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:41,955 INFO [StoreOpener-9a14bb9d2ec262933cef92c96a4c2892-1 {}] regionserver.HStore(327): Store=9a14bb9d2ec262933cef92c96a4c2892/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:55:41,956 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,957 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:55:41,961 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:55:41,962 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 9a14bb9d2ec262933cef92c96a4c2892; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809712, jitterRate=0.02960251271724701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:55:41,962 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 9a14bb9d2ec262933cef92c96a4c2892: 2024-12-16T18:55:41,963 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892., pid=6, masterSystemTime=1734375341946 2024-12-16T18:55:41,965 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,965 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:41,966 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9a14bb9d2ec262933cef92c96a4c2892, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:41,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:55:41,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 9a14bb9d2ec262933cef92c96a4c2892, server=7a6e4bf70377,44607,1734375340471 in 174 msec 2024-12-16T18:55:41,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:55:41,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=9a14bb9d2ec262933cef92c96a4c2892, ASSIGN in 331 msec 2024-12-16T18:55:41,973 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:55:41,973 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375341973"}]},"ts":"1734375341973"} 2024-12-16T18:55:41,975 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:55:41,987 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:55:41,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 398 msec 2024-12-16T18:55:41,992 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:55:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:42,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:55:42,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:55:42,016 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:55:42,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:42,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:55:42,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 39 msec 2024-12-16T18:55:42,058 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:55:42,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:55:42,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 39 msec 2024-12-16T18:55:42,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:55:42,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.617sec 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:55:42,139 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:55:42,141 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:55:42,141 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:55:42,141 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42913,1734375340318-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:55:42,202 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76d33f2c to 127.0.0.1:55775 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2fd0fe 2024-12-16T18:55:42,216 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12139cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:55:42,218 DEBUG [hconnection-0x6c391e8d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:55:42,223 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:55:42,225 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,42913,1734375340318 2024-12-16T18:55:42,225 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:55:42,232 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:55:42,236 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T18:55:42,246 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T18:55:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-16T18:55:42,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-16T18:55:42,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:55:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:55:42,258 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:55:42,258 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:42,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-16T18:55:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:55:42,261 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:55:42,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741837_1013 (size=405) 2024-12-16T18:55:42,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741837_1013 (size=405) 2024-12-16T18:55:42,292 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3bb42873d9fc46fcf7c590721e898a00, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d 2024-12-16T18:55:42,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:42,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741838_1014 (size=88) 2024-12-16T18:55:42,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741838_1014 (size=88) 2024-12-16T18:55:42,324 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:42,324 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 3bb42873d9fc46fcf7c590721e898a00, disabling compactions & flushes 2024-12-16T18:55:42,324 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,325 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,325 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. after waiting 0 ms 2024-12-16T18:55:42,325 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,325 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,325 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:55:42,326 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:55:42,327 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1734375342326"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375342326"}]},"ts":"1734375342326"} 2024-12-16T18:55:42,334 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:55:42,345 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:55:42,345 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375342345"}]},"ts":"1734375342345"} 2024-12-16T18:55:42,351 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-16T18:55:42,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:42,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3bb42873d9fc46fcf7c590721e898a00, ASSIGN}] 2024-12-16T18:55:42,415 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3bb42873d9fc46fcf7c590721e898a00, ASSIGN 2024-12-16T18:55:42,416 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3bb42873d9fc46fcf7c590721e898a00, ASSIGN; state=OFFLINE, location=7a6e4bf70377,44607,1734375340471; forceNewPlan=false, retain=false 2024-12-16T18:55:42,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-16T18:55:42,567 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3bb42873d9fc46fcf7c590721e898a00, regionState=OPENING, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:42,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 3bb42873d9fc46fcf7c590721e898a00, server=7a6e4bf70377,44607,1734375340471}] 2024-12-16T18:55:42,722 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:42,726 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 3bb42873d9fc46fcf7c590721e898a00, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:55:42,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:55:42,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,728 INFO [StoreOpener-3bb42873d9fc46fcf7c590721e898a00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,729 INFO [StoreOpener-3bb42873d9fc46fcf7c590721e898a00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3bb42873d9fc46fcf7c590721e898a00 columnFamilyName info 2024-12-16T18:55:42,729 DEBUG [StoreOpener-3bb42873d9fc46fcf7c590721e898a00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:55:42,729 INFO [StoreOpener-3bb42873d9fc46fcf7c590721e898a00-1 {}] regionserver.HStore(327): Store=3bb42873d9fc46fcf7c590721e898a00/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:55:42,730 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,731 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,733 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:55:42,735 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:55:42,735 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 3bb42873d9fc46fcf7c590721e898a00; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716643, jitterRate=-0.08874186873435974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:55:42,736 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:55:42,737 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00., pid=11, masterSystemTime=1734375342721 2024-12-16T18:55:42,738 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,738 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:42,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3bb42873d9fc46fcf7c590721e898a00, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:42,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T18:55:42,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 3bb42873d9fc46fcf7c590721e898a00, server=7a6e4bf70377,44607,1734375340471 in 172 msec 2024-12-16T18:55:42,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T18:55:42,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=3bb42873d9fc46fcf7c590721e898a00, ASSIGN in 329 msec 2024-12-16T18:55:42,745 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:55:42,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375342745"}]},"ts":"1734375342745"} 2024-12-16T18:55:42,747 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-16T18:55:42,782 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:55:42,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 533 msec 2024-12-16T18:55:43,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:43,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:43,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:44,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:44,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:44,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:45,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:45,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:45,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:46,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:46,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:46,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:46,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:55:46,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:46,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:46,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:46,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:55:47,017 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T18:55:47,018 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:55:47,018 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-16T18:55:47,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:47,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:47,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:48,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:48,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:48,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:49,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:49,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:49,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:50,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:50,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:50,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:51,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:51,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:51,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:52,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:55:52,262 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-16T18:55:52,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:55:52,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:55:52,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-16T18:55:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-16T18:55:52,280 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-16T18:55:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T18:55:52,281 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T18:55:52,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T18:55:52,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:52,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:52,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:55:52,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44607 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-16T18:55:52,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:52,444 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 9a14bb9d2ec262933cef92c96a4c2892 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:55:52,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/.tmp/info/1ade3a6b49b0499aa26643d315358534 is 45, key is default/info:d/1734375342021/Put/seqid=0 2024-12-16T18:55:52,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741839_1015 (size=5037) 2024-12-16T18:55:52,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741839_1015 (size=5037) 2024-12-16T18:55:52,495 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/.tmp/info/1ade3a6b49b0499aa26643d315358534 2024-12-16T18:55:52,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/.tmp/info/1ade3a6b49b0499aa26643d315358534 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/info/1ade3a6b49b0499aa26643d315358534 2024-12-16T18:55:52,507 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/info/1ade3a6b49b0499aa26643d315358534, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T18:55:52,508 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 9a14bb9d2ec262933cef92c96a4c2892 in 64ms, sequenceid=6, compaction requested=false 2024-12-16T18:55:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 9a14bb9d2ec262933cef92c96a4c2892: 2024-12-16T18:55:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:55:52,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-16T18:55:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-16T18:55:52,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-16T18:55:52,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 231 msec 2024-12-16T18:55:52,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 239 msec 2024-12-16T18:55:52,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:55:52,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-16T18:55:53,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:53,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:53,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:54,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:54,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:54,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:55,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:55,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:55,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:56,016 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 23930 2024-12-16T18:55:56,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:56,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:56,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:57,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:57,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:57,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:58,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:58,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:58,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:59,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:59,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:55:59,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:00,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:00,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:00,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:01,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:01,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:01,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:02,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:02,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T18:56:02,282 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-16T18:56:02,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:02,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T18:56:02,293 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-16T18:56:02,294 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T18:56:02,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T18:56:02,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:02,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:02,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:02,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44607 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-16T18:56:02,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:02,448 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 3bb42873d9fc46fcf7c590721e898a00 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:56:02,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/e26a59a8184542e19f3420085e8b9e46 is 1080, key is row0001/info:/1734375362285/Put/seqid=0 2024-12-16T18:56:02,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741840_1016 (size=6033) 2024-12-16T18:56:02,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741840_1016 (size=6033) 2024-12-16T18:56:02,482 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/e26a59a8184542e19f3420085e8b9e46 2024-12-16T18:56:02,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/e26a59a8184542e19f3420085e8b9e46 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46 2024-12-16T18:56:02,511 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46, entries=1, sequenceid=5, filesize=5.9 K 2024-12-16T18:56:02,512 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3bb42873d9fc46fcf7c590721e898a00 in 65ms, sequenceid=5, compaction requested=false 2024-12-16T18:56:02,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:02,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:02,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-16T18:56:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-16T18:56:02,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-16T18:56:02,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 220 msec 2024-12-16T18:56:02,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 228 msec 2024-12-16T18:56:03,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:03,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:03,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:04,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:04,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:04,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:05,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:05,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:05,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:06,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:06,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:06,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:07,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:07,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:07,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:08,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:08,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:08,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:09,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:09,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:09,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:10,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:10,290 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:56:10,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:10,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:11,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:11,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:11,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:12,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-16T18:56:12,294 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-16T18:56:12,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:12,304 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-16T18:56:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T18:56:12,306 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T18:56:12,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T18:56:12,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:12,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:12,458 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:12,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44607 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-16T18:56:12,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:12,460 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 3bb42873d9fc46fcf7c590721e898a00 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:56:12,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/0ac7459ac6ee4212ba92594a95d61a29 is 1080, key is row0002/info:/1734375372295/Put/seqid=0 2024-12-16T18:56:12,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741841_1017 (size=6033) 2024-12-16T18:56:12,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741841_1017 (size=6033) 2024-12-16T18:56:12,471 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/0ac7459ac6ee4212ba92594a95d61a29 2024-12-16T18:56:12,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/0ac7459ac6ee4212ba92594a95d61a29 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29 2024-12-16T18:56:12,492 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29, entries=1, sequenceid=9, filesize=5.9 K 2024-12-16T18:56:12,494 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3bb42873d9fc46fcf7c590721e898a00 in 34ms, sequenceid=9, compaction requested=false 2024-12-16T18:56:12,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:12,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:12,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-16T18:56:12,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-16T18:56:12,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-16T18:56:12,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-16T18:56:12,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-12-16T18:56:13,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:13,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:13,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:13,932 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:56:13,933 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:56:14,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:14,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:14,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:15,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:15,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:15,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:16,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:16,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:16,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:17,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:17,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:17,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:18,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:18,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:18,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:19,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:19,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:19,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:20,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:20,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:20,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:21,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:21,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:21,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:22,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:22,141 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T18:56:22,141 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T18:56:22,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-16T18:56:22,307 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-16T18:56:22,310 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44607%2C1734375340471.1734375382309 2024-12-16T18:56:22,318 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375341096 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375382309 2024-12-16T18:56:22,318 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44177:44177),(127.0.0.1/127.0.0.1:43743:43743)] 2024-12-16T18:56:22,319 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375341096 is not closed yet, will try archiving it next time 2024-12-16T18:56:22,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741833_1009 (size=6574) 2024-12-16T18:56:22,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741833_1009 (size=6574) 2024-12-16T18:56:22,323 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:22,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:22,324 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-16T18:56:22,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T18:56:22,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:22,325 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T18:56:22,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T18:56:22,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:22,477 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:22,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44607 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-16T18:56:22,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:22,478 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 3bb42873d9fc46fcf7c590721e898a00 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:56:22,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/325c78f506a24269a01572dac02ca381 is 1080, key is row0003/info:/1734375382308/Put/seqid=0 2024-12-16T18:56:22,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741843_1019 (size=6033) 2024-12-16T18:56:22,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741843_1019 (size=6033) 2024-12-16T18:56:22,495 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/325c78f506a24269a01572dac02ca381 2024-12-16T18:56:22,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/325c78f506a24269a01572dac02ca381 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381 2024-12-16T18:56:22,517 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381, entries=1, sequenceid=13, filesize=5.9 K 2024-12-16T18:56:22,519 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3bb42873d9fc46fcf7c590721e898a00 in 40ms, sequenceid=13, compaction requested=true 2024-12-16T18:56:22,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:22,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:22,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-16T18:56:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-16T18:56:22,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-16T18:56:22,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 198 msec 2024-12-16T18:56:22,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 202 msec 2024-12-16T18:56:23,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:23,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:23,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:24,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:24,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:24,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:25,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:25,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:25,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:26,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:26,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:26,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:26,952 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9a14bb9d2ec262933cef92c96a4c2892, had cached 0 bytes from a total of 5037 2024-12-16T18:56:27,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:27,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:27,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:27,726 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 3bb42873d9fc46fcf7c590721e898a00, had cached 0 bytes from a total of 18099 2024-12-16T18:56:28,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:28,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:28,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:29,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:29,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:29,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:30,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:30,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:30,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:31,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:31,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:31,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:32,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:32,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-16T18:56:32,326 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-16T18:56:32,326 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:56:32,327 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:56:32,328 DEBUG [Time-limited test {}] regionserver.HStore(1540): 3bb42873d9fc46fcf7c590721e898a00/info is initiating minor compaction (all files) 2024-12-16T18:56:32,328 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:56:32,328 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:32,328 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 3bb42873d9fc46fcf7c590721e898a00/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:32,328 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381] into tmpdir=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp, totalSize=17.7 K 2024-12-16T18:56:32,329 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting e26a59a8184542e19f3420085e8b9e46, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1734375362285 2024-12-16T18:56:32,330 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 0ac7459ac6ee4212ba92594a95d61a29, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1734375372295 2024-12-16T18:56:32,330 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 325c78f506a24269a01572dac02ca381, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734375382308 2024-12-16T18:56:32,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:32,345 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 3bb42873d9fc46fcf7c590721e898a00#info#compaction#32 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:56:32,345 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/707ea652b14848c7a9b02c447d02cc9e is 1080, key is row0001/info:/1734375362285/Put/seqid=0 2024-12-16T18:56:32,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741844_1020 (size=8296) 2024-12-16T18:56:32,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741844_1020 (size=8296) 2024-12-16T18:56:32,360 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/707ea652b14848c7a9b02c447d02cc9e as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/707ea652b14848c7a9b02c447d02cc9e 2024-12-16T18:56:32,368 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3bb42873d9fc46fcf7c590721e898a00/info of 3bb42873d9fc46fcf7c590721e898a00 into 707ea652b14848c7a9b02c447d02cc9e(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:56:32,368 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:32,370 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44607%2C1734375340471.1734375392370 2024-12-16T18:56:32,380 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375382309 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375392370 2024-12-16T18:56:32,380 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43743:43743),(127.0.0.1/127.0.0.1:44177:44177)] 2024-12-16T18:56:32,380 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375382309 is not closed yet, will try archiving it next time 2024-12-16T18:56:32,381 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375341096 to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs/7a6e4bf70377%2C44607%2C1734375340471.1734375341096 2024-12-16T18:56:32,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741842_1018 (size=2520) 2024-12-16T18:56:32,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741842_1018 (size=2520) 2024-12-16T18:56:32,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:32,388 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-16T18:56:32,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T18:56:32,392 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-16T18:56:32,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-16T18:56:32,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:32,546 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:32,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44607 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-16T18:56:32,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:32,547 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 3bb42873d9fc46fcf7c590721e898a00 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:56:32,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/c0005b1771fe45b78e33b744fd06ea56 is 1080, key is row0000/info:/1734375392369/Put/seqid=0 2024-12-16T18:56:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741846_1022 (size=6033) 2024-12-16T18:56:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741846_1022 (size=6033) 2024-12-16T18:56:32,563 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/c0005b1771fe45b78e33b744fd06ea56 2024-12-16T18:56:32,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/c0005b1771fe45b78e33b744fd06ea56 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/c0005b1771fe45b78e33b744fd06ea56 2024-12-16T18:56:32,577 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/c0005b1771fe45b78e33b744fd06ea56, entries=1, sequenceid=18, filesize=5.9 K 2024-12-16T18:56:32,578 INFO [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3bb42873d9fc46fcf7c590721e898a00 in 31ms, sequenceid=18, compaction requested=false 2024-12-16T18:56:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-16T18:56:32,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-16T18:56:32,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-16T18:56:32,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-12-16T18:56:32,582 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-16T18:56:33,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:33,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:33,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:34,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:34,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:34,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:35,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:35,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:35,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:36,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:36,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:36,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:37,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:37,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:37,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:38,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:38,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:38,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:39,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:39,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:39,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:40,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:40,291 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:56:40,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:40,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:41,016 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 23930 2024-12-16T18:56:41,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:41,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:41,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:42,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:42,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T18:56:42,392 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-16T18:56:42,395 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44607%2C1734375340471.1734375402395 2024-12-16T18:56:42,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:42,420 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375392370 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375402395 2024-12-16T18:56:42,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44177:44177),(127.0.0.1/127.0.0.1:43743:43743)] 2024-12-16T18:56:42,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375392370 is not closed yet, will try archiving it next time 2024-12-16T18:56:42,420 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471/7a6e4bf70377%2C44607%2C1734375340471.1734375382309 to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs/7a6e4bf70377%2C44607%2C1734375340471.1734375382309 2024-12-16T18:56:42,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:56:42,421 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T18:56:42,421 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76d33f2c to 127.0.0.1:55775 2024-12-16T18:56:42,421 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:56:42,421 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:56:42,421 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1726613129, stopped=false 2024-12-16T18:56:42,421 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,42913,1734375340318 2024-12-16T18:56:42,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741845_1021 (size=2026) 2024-12-16T18:56:42,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741845_1021 (size=2026) 2024-12-16T18:56:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:56:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:56:42,428 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:56:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:42,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:56:42,428 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,44607,1734375340471' ***** 2024-12-16T18:56:42,428 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:56:42,429 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:56:42,429 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:56:42,429 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:56:42,429 INFO [RS:0;7a6e4bf70377:44607 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:56:42,429 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:56:42,429 INFO [RS:0;7a6e4bf70377:44607 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(3579): Received CLOSE for 3bb42873d9fc46fcf7c590721e898a00 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(3579): Received CLOSE for 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:42,430 DEBUG [RS:0;7a6e4bf70377:44607 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:56:42,430 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3bb42873d9fc46fcf7c590721e898a00, disabling compactions & flushes 2024-12-16T18:56:42,430 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:56:42,430 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:56:42,430 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. after waiting 0 ms 2024-12-16T18:56:42,430 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:56:42,430 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3bb42873d9fc46fcf7c590721e898a00 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:56:42,430 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-16T18:56:42,430 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 3bb42873d9fc46fcf7c590721e898a00=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00., 9a14bb9d2ec262933cef92c96a4c2892=hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892.} 2024-12-16T18:56:42,430 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:56:42,431 DEBUG [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3bb42873d9fc46fcf7c590721e898a00, 9a14bb9d2ec262933cef92c96a4c2892 2024-12-16T18:56:42,431 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:56:42,431 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:56:42,431 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:56:42,431 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:56:42,431 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-16T18:56:42,436 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/44085fb098c24ae2b9a326217fbda45d is 1080, key is row0001/info:/1734375402393/Put/seqid=0 2024-12-16T18:56:42,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741848_1024 (size=6033) 2024-12-16T18:56:42,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741848_1024 (size=6033) 2024-12-16T18:56:42,444 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/44085fb098c24ae2b9a326217fbda45d 2024-12-16T18:56:42,451 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/info/088a3aaef4904b9c8a147a9e05973e8b is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00./info:regioninfo/1734375342739/Put/seqid=0 2024-12-16T18:56:42,454 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/.tmp/info/44085fb098c24ae2b9a326217fbda45d as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/44085fb098c24ae2b9a326217fbda45d 2024-12-16T18:56:42,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741849_1025 (size=8430) 2024-12-16T18:56:42,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741849_1025 (size=8430) 2024-12-16T18:56:42,459 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/info/088a3aaef4904b9c8a147a9e05973e8b 2024-12-16T18:56:42,461 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/44085fb098c24ae2b9a326217fbda45d, entries=1, sequenceid=22, filesize=5.9 K 2024-12-16T18:56:42,461 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 3bb42873d9fc46fcf7c590721e898a00 in 31ms, sequenceid=22, compaction requested=true 2024-12-16T18:56:42,462 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381] to archive 2024-12-16T18:56:42,463 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T18:56:42,465 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46 to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/e26a59a8184542e19f3420085e8b9e46 2024-12-16T18:56:42,466 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29 to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/0ac7459ac6ee4212ba92594a95d61a29 2024-12-16T18:56:42,466 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381 to hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/info/325c78f506a24269a01572dac02ca381 2024-12-16T18:56:42,470 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/3bb42873d9fc46fcf7c590721e898a00/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-16T18:56:42,471 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3bb42873d9fc46fcf7c590721e898a00: 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734375342248.3bb42873d9fc46fcf7c590721e898a00. 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9a14bb9d2ec262933cef92c96a4c2892, disabling compactions & flushes 2024-12-16T18:56:42,471 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. after waiting 0 ms 2024-12-16T18:56:42,471 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:56:42,475 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/namespace/9a14bb9d2ec262933cef92c96a4c2892/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T18:56:42,476 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:56:42,476 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9a14bb9d2ec262933cef92c96a4c2892: 2024-12-16T18:56:42,476 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375341590.9a14bb9d2ec262933cef92c96a4c2892. 2024-12-16T18:56:42,480 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/table/88a15603e15542bf9560bf0e21153203 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1734375342745/Put/seqid=0 2024-12-16T18:56:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741850_1026 (size=5532) 2024-12-16T18:56:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741850_1026 (size=5532) 2024-12-16T18:56:42,485 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/table/88a15603e15542bf9560bf0e21153203 2024-12-16T18:56:42,492 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/info/088a3aaef4904b9c8a147a9e05973e8b as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/info/088a3aaef4904b9c8a147a9e05973e8b 2024-12-16T18:56:42,498 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/info/088a3aaef4904b9c8a147a9e05973e8b, entries=20, sequenceid=14, filesize=8.2 K 2024-12-16T18:56:42,499 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/.tmp/table/88a15603e15542bf9560bf0e21153203 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/table/88a15603e15542bf9560bf0e21153203 2024-12-16T18:56:42,505 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/table/88a15603e15542bf9560bf0e21153203, entries=4, sequenceid=14, filesize=5.4 K 2024-12-16T18:56:42,506 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=14, compaction requested=false 2024-12-16T18:56:42,510 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-16T18:56:42,511 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:56:42,511 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:56:42,512 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:56:42,512 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T18:56:42,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-16T18:56:42,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T18:56:42,631 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,44607,1734375340471; all regions closed. 2024-12-16T18:56:42,631 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741834_1010 (size=4570) 2024-12-16T18:56:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741834_1010 (size=4570) 2024-12-16T18:56:42,635 DEBUG [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs 2024-12-16T18:56:42,635 INFO [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C44607%2C1734375340471.meta:.meta(num 1734375341470) 2024-12-16T18:56:42,635 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/WALs/7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:42,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741847_1023 (size=1545) 2024-12-16T18:56:42,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741847_1023 (size=1545) 2024-12-16T18:56:42,639 DEBUG [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/oldWALs 2024-12-16T18:56:42,639 INFO [RS:0;7a6e4bf70377:44607 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C44607%2C1734375340471:(num 1734375402395) 2024-12-16T18:56:42,639 DEBUG [RS:0;7a6e4bf70377:44607 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:56:42,639 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:56:42,640 INFO [RS:0;7a6e4bf70377:44607 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T18:56:42,640 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:56:42,640 INFO [RS:0;7a6e4bf70377:44607 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44607 2024-12-16T18:56:42,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,44607,1734375340471 2024-12-16T18:56:42,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:56:42,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,44607,1734375340471] 2024-12-16T18:56:42,659 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,44607,1734375340471; numProcessing=1 2024-12-16T18:56:42,667 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,44607,1734375340471 already deleted, retry=false 2024-12-16T18:56:42,667 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,44607,1734375340471 expired; onlineServers=0 2024-12-16T18:56:42,667 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,42913,1734375340318' ***** 2024-12-16T18:56:42,667 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:56:42,668 DEBUG [M:0;7a6e4bf70377:42913 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e19ccb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:56:42,668 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,42913,1734375340318 2024-12-16T18:56:42,668 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,42913,1734375340318; all regions closed. 2024-12-16T18:56:42,668 DEBUG [M:0;7a6e4bf70377:42913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:56:42,668 DEBUG [M:0;7a6e4bf70377:42913 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:56:42,668 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:56:42,668 DEBUG [M:0;7a6e4bf70377:42913 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:56:42,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375340826 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375340826,5,FailOnTimeoutGroup] 2024-12-16T18:56:42,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375340822 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375340822,5,FailOnTimeoutGroup] 2024-12-16T18:56:42,668 INFO [M:0;7a6e4bf70377:42913 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:56:42,669 DEBUG [M:0;7a6e4bf70377:42913 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:56:42,669 INFO [M:0;7a6e4bf70377:42913 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:56:42,669 INFO [M:0;7a6e4bf70377:42913 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:56:42,669 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:56:42,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:56:42,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:42,678 DEBUG [M:0;7a6e4bf70377:42913 {}] zookeeper.ZKUtil(347): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:56:42,678 WARN [M:0;7a6e4bf70377:42913 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:56:42,678 INFO [M:0;7a6e4bf70377:42913 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:56:42,679 INFO [M:0;7a6e4bf70377:42913 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:56:42,679 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:56:42,679 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:56:42,679 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:42,679 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:42,679 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:56:42,679 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:42,679 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.10 KB heapSize=81.72 KB 2024-12-16T18:56:42,695 DEBUG [M:0;7a6e4bf70377:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc70750c625448b8f744baa66aa1dc0 is 82, key is hbase:meta,,1/info:regioninfo/1734375341513/Put/seqid=0 2024-12-16T18:56:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741851_1027 (size=5672) 2024-12-16T18:56:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741851_1027 (size=5672) 2024-12-16T18:56:42,759 INFO [RS:0;7a6e4bf70377:44607 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,44607,1734375340471; zookeeper connection closed. 2024-12-16T18:56:42,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:56:42,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44607-0x100e45577bf0001, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:56:42,760 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69b140f2 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69b140f2 2024-12-16T18:56:42,760 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T18:56:42,962 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:56:43,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:43,103 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc70750c625448b8f744baa66aa1dc0 2024-12-16T18:56:43,122 DEBUG [M:0;7a6e4bf70377:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b576181142734da891517b3751277419 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734375342785/Put/seqid=0 2024-12-16T18:56:43,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741852_1028 (size=8358) 2024-12-16T18:56:43,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741852_1028 (size=8358) 2024-12-16T18:56:43,127 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.49 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b576181142734da891517b3751277419 2024-12-16T18:56:43,131 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b576181142734da891517b3751277419 2024-12-16T18:56:43,145 DEBUG [M:0;7a6e4bf70377:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac7586d1dad4468a8ffe5c1887f4a873 is 69, key is 7a6e4bf70377,44607,1734375340471/rs:state/1734375340937/Put/seqid=0 2024-12-16T18:56:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741853_1029 (size=5156) 2024-12-16T18:56:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741853_1029 (size=5156) 2024-12-16T18:56:43,150 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac7586d1dad4468a8ffe5c1887f4a873 2024-12-16T18:56:43,167 DEBUG [M:0;7a6e4bf70377:42913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62e6b5b912cb4733b1dde4c80b9a4440 is 52, key is load_balancer_on/state:d/1734375342226/Put/seqid=0 2024-12-16T18:56:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741854_1030 (size=5056) 2024-12-16T18:56:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741854_1030 (size=5056) 2024-12-16T18:56:43,172 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62e6b5b912cb4733b1dde4c80b9a4440 2024-12-16T18:56:43,176 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc70750c625448b8f744baa66aa1dc0 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc70750c625448b8f744baa66aa1dc0 2024-12-16T18:56:43,181 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=11, reused chunk count=31, reuseRatio=73.81% 2024-12-16T18:56:43,181 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc70750c625448b8f744baa66aa1dc0, entries=8, sequenceid=184, filesize=5.5 K 2024-12-16T18:56:43,181 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-16T18:56:43,182 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b576181142734da891517b3751277419 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b576181142734da891517b3751277419 2024-12-16T18:56:43,187 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b576181142734da891517b3751277419 2024-12-16T18:56:43,187 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b576181142734da891517b3751277419, entries=21, sequenceid=184, filesize=8.2 K 2024-12-16T18:56:43,188 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac7586d1dad4468a8ffe5c1887f4a873 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac7586d1dad4468a8ffe5c1887f4a873 2024-12-16T18:56:43,193 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac7586d1dad4468a8ffe5c1887f4a873, entries=1, sequenceid=184, filesize=5.0 K 2024-12-16T18:56:43,194 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62e6b5b912cb4733b1dde4c80b9a4440 as hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/62e6b5b912cb4733b1dde4c80b9a4440 2024-12-16T18:56:43,199 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43947/user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/62e6b5b912cb4733b1dde4c80b9a4440, entries=1, sequenceid=184, filesize=4.9 K 2024-12-16T18:56:43,200 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.10 KB/66658, heapSize ~81.66 KB/83616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 521ms, sequenceid=184, compaction requested=false 2024-12-16T18:56:43,201 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:43,201 DEBUG [M:0;7a6e4bf70377:42913 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:56:43,202 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/55b00984-e867-7a09-9bb4-3a7b80a76d2d/MasterData/WALs/7a6e4bf70377,42913,1734375340318 2024-12-16T18:56:43,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36025 is added to blk_1073741830_1006 (size=79179) 2024-12-16T18:56:43,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741830_1006 (size=79179) 2024-12-16T18:56:43,204 INFO [M:0;7a6e4bf70377:42913 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:56:43,204 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:56:43,204 INFO [M:0;7a6e4bf70377:42913 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42913 2024-12-16T18:56:43,237 DEBUG [M:0;7a6e4bf70377:42913 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,42913,1734375340318 already deleted, retry=false 2024-12-16T18:56:43,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:43,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:56:43,346 INFO [M:0;7a6e4bf70377:42913 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,42913,1734375340318; zookeeper connection closed. 2024-12-16T18:56:43,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42913-0x100e45577bf0000, quorum=127.0.0.1:55775, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:56:43,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@206aa743{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:56:43,350 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5279ded4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:56:43,350 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:56:43,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31c93d20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:56:43,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@581d3af9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,STOPPED} 2024-12-16T18:56:43,353 WARN [BP-304968768-172.17.0.2-1734375338081 heartbeating to localhost/127.0.0.1:43947 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:56:43,353 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:56:43,353 WARN [BP-304968768-172.17.0.2-1734375338081 heartbeating to localhost/127.0.0.1:43947 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-304968768-172.17.0.2-1734375338081 (Datanode Uuid dff0078f-140f-4c23-ac07-60e2185a30e9) service to localhost/127.0.0.1:43947 2024-12-16T18:56:43,353 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:56:43,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data3/current/BP-304968768-172.17.0.2-1734375338081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:56:43,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data4/current/BP-304968768-172.17.0.2-1734375338081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:56:43,355 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:56:43,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@102c13ed{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:56:43,359 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1400588a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:56:43,359 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:56:43,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e333cd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:56:43,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48a21b91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,STOPPED} 2024-12-16T18:56:43,360 WARN [BP-304968768-172.17.0.2-1734375338081 heartbeating to localhost/127.0.0.1:43947 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:56:43,360 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:56:43,360 WARN [BP-304968768-172.17.0.2-1734375338081 heartbeating to localhost/127.0.0.1:43947 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-304968768-172.17.0.2-1734375338081 (Datanode Uuid 370b6431-714e-4987-a944-9f1f7ed2f740) service to localhost/127.0.0.1:43947 2024-12-16T18:56:43,360 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:56:43,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data1/current/BP-304968768-172.17.0.2-1734375338081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:56:43,361 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/cluster_33e4e342-1004-fec4-a917-1c21cc45ed7c/dfs/data/data2/current/BP-304968768-172.17.0.2-1734375338081 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:56:43,361 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:56:43,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6843989b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:56:43,367 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20f985a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:56:43,367 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:56:43,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8dbcd51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:56:43,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@787f6765{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir/,STOPPED} 2024-12-16T18:56:43,373 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:56:43,393 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:56:43,399 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=112 (was 99) - Thread LEAK? -, OpenFileDescriptor=466 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=266 (was 327), ProcessCount=11 (was 11), AvailableMemoryMB=3861 (was 4744) 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=112, OpenFileDescriptor=466, MaxFileDescriptor=1048576, SystemLoadAverage=266, ProcessCount=11, AvailableMemoryMB=3861 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.log.dir so I do NOT create it in target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1332edb6-9b51-da50-cbd2-44a1ddc2c24d/hadoop.tmp.dir so I do NOT create it in target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1, deleteOnExit=true 2024-12-16T18:56:43,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/test.cache.data in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:56:43,408 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:56:43,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:56:43,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:56:43,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:43,423 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:56:44,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:44,245 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:56:44,248 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:56:44,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:56:44,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:56:44,250 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:56:44,250 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:56:44,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@650dbc9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:56:44,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ef0a6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:56:44,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63986ce6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/java.io.tmpdir/jetty-localhost-35343-hadoop-hdfs-3_4_1-tests_jar-_-any-240101736886289972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:56:44,340 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29aa261{HTTP/1.1, (http/1.1)}{localhost:35343} 2024-12-16T18:56:44,340 INFO [Time-limited test {}] server.Server(415): Started @308707ms 2024-12-16T18:56:44,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:44,350 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:56:44,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:44,544 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:56:44,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:56:44,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:56:44,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:56:44,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:56:44,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@436616ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:56:44,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ab6d217{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:56:44,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26ac97a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/java.io.tmpdir/jetty-localhost-39025-hadoop-hdfs-3_4_1-tests_jar-_-any-6288104847782147281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:56:44,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f52bd0{HTTP/1.1, (http/1.1)}{localhost:39025} 2024-12-16T18:56:44,639 INFO [Time-limited test {}] server.Server(415): Started @309005ms 2024-12-16T18:56:44,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:56:44,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:56:44,674 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:56:44,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:56:44,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:56:44,677 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:56:44,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@499b8a67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:56:44,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5963885b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:56:44,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@202ada42{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/java.io.tmpdir/jetty-localhost-35293-hadoop-hdfs-3_4_1-tests_jar-_-any-1853413956102053620/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:56:44,778 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@379c0c1f{HTTP/1.1, (http/1.1)}{localhost:35293} 2024-12-16T18:56:44,778 INFO [Time-limited test {}] server.Server(415): Started @309145ms 2024-12-16T18:56:44,779 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:56:45,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:45,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:45,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:45,483 WARN [Thread-1740 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data1/current/BP-606476777-172.17.0.2-1734375403429/current, will proceed with Du for space computation calculation, 2024-12-16T18:56:45,483 WARN [Thread-1741 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data2/current/BP-606476777-172.17.0.2-1734375403429/current, will proceed with Du for space computation calculation, 2024-12-16T18:56:45,500 WARN [Thread-1704 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:56:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff400506ba46907c with lease ID 0x7b7d367b291fbfe0: Processing first storage report for DS-2b7fa7e2-7e45-444a-8cf8-26d0e05c6abf from datanode DatanodeRegistration(127.0.0.1:34265, datanodeUuid=21f77a1b-dc99-4c51-addf-0571c39b983e, infoPort=38895, infoSecurePort=0, ipcPort=34215, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429) 2024-12-16T18:56:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff400506ba46907c with lease ID 0x7b7d367b291fbfe0: from storage DS-2b7fa7e2-7e45-444a-8cf8-26d0e05c6abf node DatanodeRegistration(127.0.0.1:34265, datanodeUuid=21f77a1b-dc99-4c51-addf-0571c39b983e, infoPort=38895, infoSecurePort=0, ipcPort=34215, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:56:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff400506ba46907c with lease ID 0x7b7d367b291fbfe0: Processing first storage report for DS-71a01488-c4cf-4553-a94f-d3416310eada from datanode DatanodeRegistration(127.0.0.1:34265, datanodeUuid=21f77a1b-dc99-4c51-addf-0571c39b983e, infoPort=38895, infoSecurePort=0, ipcPort=34215, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429) 2024-12-16T18:56:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff400506ba46907c with lease ID 0x7b7d367b291fbfe0: from storage DS-71a01488-c4cf-4553-a94f-d3416310eada node DatanodeRegistration(127.0.0.1:34265, datanodeUuid=21f77a1b-dc99-4c51-addf-0571c39b983e, infoPort=38895, infoSecurePort=0, ipcPort=34215, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:56:45,682 WARN [Thread-1751 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data3/current/BP-606476777-172.17.0.2-1734375403429/current, will proceed with Du for space computation calculation, 2024-12-16T18:56:45,683 WARN [Thread-1752 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data4/current/BP-606476777-172.17.0.2-1734375403429/current, will proceed with Du for space computation calculation, 2024-12-16T18:56:45,701 WARN [Thread-1727 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:56:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd195f9c40524e9b0 with lease ID 0x7b7d367b291fbfe1: Processing first storage report for DS-384cb082-8777-44de-8870-76f97f0c1b84 from datanode DatanodeRegistration(127.0.0.1:36229, datanodeUuid=6af1cc1c-3469-418d-b44a-b91b4334daae, infoPort=41819, infoSecurePort=0, ipcPort=32889, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429) 2024-12-16T18:56:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd195f9c40524e9b0 with lease ID 0x7b7d367b291fbfe1: from storage DS-384cb082-8777-44de-8870-76f97f0c1b84 node DatanodeRegistration(127.0.0.1:36229, datanodeUuid=6af1cc1c-3469-418d-b44a-b91b4334daae, infoPort=41819, infoSecurePort=0, ipcPort=32889, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:56:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd195f9c40524e9b0 with lease ID 0x7b7d367b291fbfe1: Processing first storage report for DS-8e7b01ad-56ef-4aaa-bf42-60a28519db69 from datanode DatanodeRegistration(127.0.0.1:36229, datanodeUuid=6af1cc1c-3469-418d-b44a-b91b4334daae, infoPort=41819, infoSecurePort=0, ipcPort=32889, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429) 2024-12-16T18:56:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd195f9c40524e9b0 with lease ID 0x7b7d367b291fbfe1: from storage DS-8e7b01ad-56ef-4aaa-bf42-60a28519db69 node DatanodeRegistration(127.0.0.1:36229, datanodeUuid=6af1cc1c-3469-418d-b44a-b91b4334daae, infoPort=41819, infoSecurePort=0, ipcPort=32889, storageInfo=lv=-57;cid=testClusterID;nsid=3254744;c=1734375403429), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:56:45,721 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc 2024-12-16T18:56:45,724 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/zookeeper_0, clientPort=62808, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:56:45,725 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62808 2024-12-16T18:56:45,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:56:45,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:56:45,737 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a with version=8 2024-12-16T18:56:45,738 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:56:45,739 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:56:45,739 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:56:45,740 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34203 2024-12-16T18:56:45,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,744 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34203 connecting to ZooKeeper ensemble=127.0.0.1:62808 2024-12-16T18:56:45,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342030x0, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:56:45,803 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34203-0x100e45677550000 connected 2024-12-16T18:56:45,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:56:45,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:56:45,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:56:45,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34203 2024-12-16T18:56:45,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34203 2024-12-16T18:56:45,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34203 2024-12-16T18:56:45,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34203 2024-12-16T18:56:45,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34203 2024-12-16T18:56:45,865 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a, hbase.cluster.distributed=false 2024-12-16T18:56:45,882 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:56:45,882 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:56:45,883 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:56:45,883 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42969 2024-12-16T18:56:45,884 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:56:45,884 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:56:45,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,889 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42969 connecting to ZooKeeper ensemble=127.0.0.1:62808 2024-12-16T18:56:45,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429690x0, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:56:45,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429690x0, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:56:45,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42969-0x100e45677550001 connected 2024-12-16T18:56:45,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:56:45,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:56:45,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-16T18:56:45,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42969 2024-12-16T18:56:45,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42969 2024-12-16T18:56:45,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-16T18:56:45,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-16T18:56:45,899 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:45,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:56:45,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:56:45,909 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:45,910 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:34203 2024-12-16T18:56:45,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:56:45,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:56:45,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:45,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:45,920 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:56:45,920 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,34203,1734375405739 from backup master directory 2024-12-16T18:56:45,921 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:56:45,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:45,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:56:45,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:56:45,928 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:56:45,928 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:45,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:56:45,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:56:45,945 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/hbase.id with ID: 116de6d9-c58e-492f-b7ec-fe287b3f720a 2024-12-16T18:56:45,958 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:45,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:45,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:45,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:56:45,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:56:45,977 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:56:45,978 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:56:45,978 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:56:45,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:56:45,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:56:45,986 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store 2024-12-16T18:56:45,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:56:45,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:56:45,994 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:45,994 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:56:45,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:56:45,995 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/.initializing 2024-12-16T18:56:45,995 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/WALs/7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:45,997 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C34203%2C1734375405739, suffix=, logDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/WALs/7a6e4bf70377,34203,1734375405739, archiveDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/oldWALs, maxLogs=10 2024-12-16T18:56:45,998 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C34203%2C1734375405739.1734375405998 2024-12-16T18:56:46,003 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/WALs/7a6e4bf70377,34203,1734375405739/7a6e4bf70377%2C34203%2C1734375405739.1734375405998 2024-12-16T18:56:46,003 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41819:41819),(127.0.0.1/127.0.0.1:38895:38895)] 2024-12-16T18:56:46,004 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:56:46,004 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:46,004 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,004 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:56:46,008 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:56:46,011 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:56:46,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:56:46,014 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:56:46,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:56:46,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:56:46,018 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,019 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,021 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:56:46,022 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:56:46,024 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:56:46,024 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755686, jitterRate=-0.039096564054489136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:56:46,025 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:56:46,025 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:56:46,028 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2914339e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:56:46,029 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:56:46,029 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:56:46,029 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:56:46,030 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:56:46,030 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-16T18:56:46,030 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-16T18:56:46,030 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:56:46,032 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:56:46,033 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:56:46,042 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:56:46,042 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:56:46,043 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:56:46,053 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:56:46,053 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:56:46,054 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:56:46,061 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:56:46,062 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:56:46,070 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:56:46,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:46,071 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:56:46,078 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:56:46,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:56:46,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:56:46,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,087 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,34203,1734375405739, sessionid=0x100e45677550000, setting cluster-up flag (Was=false) 2024-12-16T18:56:46,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,128 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:56:46,129 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:46,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,203 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:56:46,204 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:46,206 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:56:46,207 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:56:46,207 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,34203,1734375405739 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:56:46,207 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,208 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375436208 2024-12-16T18:56:46,208 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:56:46,208 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:56:46,209 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:56:46,209 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:56:46,209 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:56:46,210 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375406210,5,FailOnTimeoutGroup] 2024-12-16T18:56:46,210 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,210 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375406210,5,FailOnTimeoutGroup] 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,210 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,210 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:56:46,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:56:46,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:56:46,219 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:56:46,219 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a 2024-12-16T18:56:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:56:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741832_1008 (size=32) 2024-12-16T18:56:46,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:46,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:56:46,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:56:46,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:56:46,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:56:46,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:56:46,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:56:46,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,236 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740 2024-12-16T18:56:46,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740 2024-12-16T18:56:46,239 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:56:46,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:56:46,242 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:56:46,243 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821258, jitterRate=0.04428471624851227}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:56:46,243 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:56:46,243 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:56:46,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:56:46,244 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:56:46,244 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:56:46,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:56:46,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:56:46,246 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:56:46,310 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:42969 2024-12-16T18:56:46,311 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1008): ClusterId : 116de6d9-c58e-492f-b7ec-fe287b3f720a 2024-12-16T18:56:46,311 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:56:46,329 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:56:46,329 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:56:46,337 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:56:46,337 DEBUG [RS:0;7a6e4bf70377:42969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f02c3f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:56:46,338 DEBUG [RS:0;7a6e4bf70377:42969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@544431a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:56:46,338 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:56:46,338 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:56:46,338 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:56:46,338 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,34203,1734375405739 with isa=7a6e4bf70377/172.17.0.2:42969, startcode=1734375405882 2024-12-16T18:56:46,338 DEBUG [RS:0;7a6e4bf70377:42969 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:56:46,340 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52497, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:56:46,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,342 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a 2024-12-16T18:56:46,342 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:32987 2024-12-16T18:56:46,342 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:56:46,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:46,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:56:46,351 DEBUG [RS:0;7a6e4bf70377:42969 {}] zookeeper.ZKUtil(111): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,351 WARN [RS:0;7a6e4bf70377:42969 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:56:46,351 INFO [RS:0;7a6e4bf70377:42969 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:56:46,351 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,351 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,42969,1734375405882] 2024-12-16T18:56:46,355 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:56:46,355 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:56:46,357 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:56:46,357 INFO [RS:0;7a6e4bf70377:42969 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:56:46,357 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,358 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:56:46,359 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:56:46,359 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:56:46,360 DEBUG [RS:0;7a6e4bf70377:42969 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:56:46,360 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,360 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,360 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,360 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,360 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42969,1734375405882-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:56:46,377 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:56:46,378 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42969,1734375405882-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,390 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.Replication(204): 7a6e4bf70377,42969,1734375405882 started 2024-12-16T18:56:46,390 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,42969,1734375405882, RpcServer on 7a6e4bf70377/172.17.0.2:42969, sessionid=0x100e45677550001 2024-12-16T18:56:46,390 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:56:46,390 DEBUG [RS:0;7a6e4bf70377:42969 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,390 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42969,1734375405882' 2024-12-16T18:56:46,390 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42969,1734375405882' 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:56:46,391 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:56:46,392 DEBUG [RS:0;7a6e4bf70377:42969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:56:46,392 INFO [RS:0;7a6e4bf70377:42969 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:56:46,392 INFO [RS:0;7a6e4bf70377:42969 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:56:46,396 WARN [7a6e4bf70377:34203 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T18:56:46,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:46,497 INFO [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42969%2C1734375405882, suffix=, logDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882, archiveDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs, maxLogs=32 2024-12-16T18:56:46,499 INFO [RS:0;7a6e4bf70377:42969 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42969%2C1734375405882.1734375406499 2024-12-16T18:56:46,506 INFO [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375406499 2024-12-16T18:56:46,507 DEBUG [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38895:38895),(127.0.0.1/127.0.0.1:41819:41819)] 2024-12-16T18:56:46,647 DEBUG [7a6e4bf70377:34203 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:56:46,648 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,650 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42969,1734375405882, state=OPENING 2024-12-16T18:56:46,661 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:56:46,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:46,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:56:46,670 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:56:46,670 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:56:46,822 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,822 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:56:46,824 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:56:46,830 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:56:46,830 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:56:46,832 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42969%2C1734375405882.meta, suffix=.meta, logDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882, archiveDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs, maxLogs=32 2024-12-16T18:56:46,834 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42969%2C1734375405882.meta.1734375406834.meta 2024-12-16T18:56:46,849 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.meta.1734375406834.meta 2024-12-16T18:56:46,849 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41819:41819),(127.0.0.1/127.0.0.1:38895:38895)] 2024-12-16T18:56:46,849 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:56:46,850 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:56:46,850 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:56:46,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:56:46,852 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:56:46,852 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:56:46,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:56:46,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:56:46,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:56:46,859 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,860 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:56:46,861 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740 2024-12-16T18:56:46,862 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740 2024-12-16T18:56:46,864 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:56:46,866 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:56:46,866 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836519, jitterRate=0.06368967890739441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:56:46,867 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:56:46,867 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375406822 2024-12-16T18:56:46,869 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:56:46,869 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:56:46,870 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:46,870 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42969,1734375405882, state=OPEN 2024-12-16T18:56:46,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:56:46,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:56:46,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:56:46,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:56:46,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:56:46,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42969,1734375405882 in 239 msec 2024-12-16T18:56:46,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:56:46,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-12-16T18:56:46,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 708 msec 2024-12-16T18:56:46,916 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375406916, completionTime=-1 2024-12-16T18:56:46,916 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:56:46,916 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:56:46,917 DEBUG [hconnection-0x7c84a69-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:56:46,918 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:56:46,919 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:56:46,919 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375466919 2024-12-16T18:56:46,919 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375526919 2024-12-16T18:56:46,919 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-16T18:56:46,945 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,945 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,945 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,946 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:34203, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,946 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:46,946 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:56:46,946 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:56:46,947 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:56:46,948 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:56:46,948 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:56:46,948 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:46,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:56:46,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:56:46,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:56:46,959 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 58ec97c1298be15af22bf03642bc8ca9, NAME => 'hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a 2024-12-16T18:56:46,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:56:46,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 58ec97c1298be15af22bf03642bc8ca9, disabling compactions & flushes 2024-12-16T18:56:46,966 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. after waiting 0 ms 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:46,966 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:46,966 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 58ec97c1298be15af22bf03642bc8ca9: 2024-12-16T18:56:46,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:56:46,968 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375406967"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375406967"}]},"ts":"1734375406967"} 2024-12-16T18:56:46,969 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:56:46,970 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:56:46,970 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375406970"}]},"ts":"1734375406970"} 2024-12-16T18:56:46,972 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:56:46,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=58ec97c1298be15af22bf03642bc8ca9, ASSIGN}] 2024-12-16T18:56:46,988 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=58ec97c1298be15af22bf03642bc8ca9, ASSIGN 2024-12-16T18:56:46,988 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=58ec97c1298be15af22bf03642bc8ca9, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42969,1734375405882; forceNewPlan=false, retain=false 2024-12-16T18:56:47,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:47,139 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=58ec97c1298be15af22bf03642bc8ca9, regionState=OPENING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 58ec97c1298be15af22bf03642bc8ca9, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:56:47,294 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,298 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:47,298 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 58ec97c1298be15af22bf03642bc8ca9, NAME => 'hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:56:47,298 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:47,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,300 INFO [StoreOpener-58ec97c1298be15af22bf03642bc8ca9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,302 INFO [StoreOpener-58ec97c1298be15af22bf03642bc8ca9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58ec97c1298be15af22bf03642bc8ca9 columnFamilyName info 2024-12-16T18:56:47,302 DEBUG [StoreOpener-58ec97c1298be15af22bf03642bc8ca9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:47,303 INFO [StoreOpener-58ec97c1298be15af22bf03642bc8ca9-1 {}] regionserver.HStore(327): Store=58ec97c1298be15af22bf03642bc8ca9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:56:47,304 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,304 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:56:47,310 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:56:47,310 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 58ec97c1298be15af22bf03642bc8ca9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817162, jitterRate=0.03907524049282074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:56:47,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 58ec97c1298be15af22bf03642bc8ca9: 2024-12-16T18:56:47,312 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9., pid=6, masterSystemTime=1734375407294 2024-12-16T18:56:47,314 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:47,314 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:56:47,315 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=58ec97c1298be15af22bf03642bc8ca9, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,315 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=7a6e4bf70377,42969,1734375405882, table=hbase:namespace, region=58ec97c1298be15af22bf03642bc8ca9. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-16T18:56:47,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:56:47,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 58ec97c1298be15af22bf03642bc8ca9, server=7a6e4bf70377,42969,1734375405882 in 177 msec 2024-12-16T18:56:47,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:56:47,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=58ec97c1298be15af22bf03642bc8ca9, ASSIGN in 335 msec 2024-12-16T18:56:47,324 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:56:47,324 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375407324"}]},"ts":"1734375407324"} 2024-12-16T18:56:47,326 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:56:47,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:47,379 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:56:47,380 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:56:47,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 434 msec 2024-12-16T18:56:47,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:56:47,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:47,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:56:47,391 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:56:47,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:56:47,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 22 msec 2024-12-16T18:56:47,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:47,423 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:56:47,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:56:47,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 22 msec 2024-12-16T18:56:47,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:56:47,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:56:47,486 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.558sec 2024-12-16T18:56:47,486 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:56:47,486 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:56:47,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,486 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:56:47,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:56:47,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:56:47,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:56:47,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:56:47,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,488 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:56:47,488 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:56:47,488 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,34203,1734375405739-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:56:47,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:47,502 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c391f13 to 127.0.0.1:62808 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@308a8075 2024-12-16T18:56:47,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@394ac144, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:56:47,514 DEBUG [hconnection-0x5200770d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:56:47,516 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:56:47,521 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,34203,1734375405739 2024-12-16T18:56:47,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:56:47,524 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:56:47,525 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T18:56:47,528 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T18:56:47,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-16T18:56:47,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-16T18:56:47,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:56:47,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-16T18:56:47,532 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:56:47,532 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:47,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-16T18:56:47,533 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:56:47,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:56:47,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741837_1013 (size=381) 2024-12-16T18:56:47,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741837_1013 (size=381) 2024-12-16T18:56:47,542 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fab14af368cb42e95d16937b9023fbb2, NAME => 'TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a 2024-12-16T18:56:47,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741838_1014 (size=64) 2024-12-16T18:56:47,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741838_1014 (size=64) 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing fab14af368cb42e95d16937b9023fbb2, disabling compactions & flushes 2024-12-16T18:56:47,549 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. after waiting 0 ms 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,549 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,549 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:56:47,550 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:56:47,551 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1734375407550"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375407550"}]},"ts":"1734375407550"} 2024-12-16T18:56:47,552 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:56:47,553 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:56:47,553 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375407553"}]},"ts":"1734375407553"} 2024-12-16T18:56:47,557 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-16T18:56:47,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, ASSIGN}] 2024-12-16T18:56:47,580 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, ASSIGN 2024-12-16T18:56:47,581 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42969,1734375405882; forceNewPlan=false, retain=false 2024-12-16T18:56:47,731 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=fab14af368cb42e95d16937b9023fbb2, regionState=OPENING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:56:47,885 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,890 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,890 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => fab14af368cb42e95d16937b9023fbb2, NAME => 'TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:56:47,890 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,890 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:56:47,890 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,890 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,892 INFO [StoreOpener-fab14af368cb42e95d16937b9023fbb2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,893 INFO [StoreOpener-fab14af368cb42e95d16937b9023fbb2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fab14af368cb42e95d16937b9023fbb2 columnFamilyName info 2024-12-16T18:56:47,893 DEBUG [StoreOpener-fab14af368cb42e95d16937b9023fbb2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:56:47,894 INFO [StoreOpener-fab14af368cb42e95d16937b9023fbb2-1 {}] regionserver.HStore(327): Store=fab14af368cb42e95d16937b9023fbb2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:56:47,895 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,895 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,897 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:47,900 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:56:47,900 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened fab14af368cb42e95d16937b9023fbb2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777345, jitterRate=-0.011554911732673645}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:56:47,901 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:56:47,902 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2., pid=11, masterSystemTime=1734375407885 2024-12-16T18:56:47,904 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,904 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:47,905 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=fab14af368cb42e95d16937b9023fbb2, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:47,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T18:56:47,909 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 in 174 msec 2024-12-16T18:56:47,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T18:56:47,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, ASSIGN in 330 msec 2024-12-16T18:56:47,911 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:56:47,911 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375407911"}]},"ts":"1734375407911"} 2024-12-16T18:56:47,913 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-16T18:56:47,921 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:56:47,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 391 msec 2024-12-16T18:56:47,998 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:56:47,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:48,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:48,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:48,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:49,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:49,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:49,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:50,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:50,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:50,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:51,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:51,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:51,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:52,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:52,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:52,356 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T18:56:52,356 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:56:52,356 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-16T18:56:52,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:52,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T18:56:52,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T18:56:52,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-16T18:56:52,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-16T18:56:53,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:53,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:53,392 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:56:53,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,394 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:56:53,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:54,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:54,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:54,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:55,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:55,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:55,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:56,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:56,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:56,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:57,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:57,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:57,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:57,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34203 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T18:56:57,535 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-16T18:56:57,538 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-16T18:56:57,538 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:56:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:56:57,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:56:57,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/14be8dcaf4184505ac7f672ef219697c is 1080, key is row0001/info:/1734375417543/Put/seqid=0 2024-12-16T18:56:57,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741839_1015 (size=12509) 2024-12-16T18:56:57,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741839_1015 (size=12509) 2024-12-16T18:56:57,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:56:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375427635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:56:58,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/14be8dcaf4184505ac7f672ef219697c 2024-12-16T18:56:58,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/14be8dcaf4184505ac7f672ef219697c as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c 2024-12-16T18:56:58,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c, entries=7, sequenceid=11, filesize=12.2 K 2024-12-16T18:56:58,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for fab14af368cb42e95d16937b9023fbb2 in 467ms, sequenceid=11, compaction requested=false 2024-12-16T18:56:58,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:56:58,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:58,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:58,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:59,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:59,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:56:59,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:00,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:00,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:00,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:01,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:01,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:01,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:02,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:02,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:02,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:03,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:03,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:03,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:04,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:04,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:04,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:05,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:05,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:05,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:06,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:06,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:06,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:07,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:07,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:07,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:07,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-16T18:57:07,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/eb29b5f0b684498192c8f084b738281d is 1080, key is row0008/info:/1734375417562/Put/seqid=0 2024-12-16T18:57:07,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741840_1016 (size=29761) 2024-12-16T18:57:07,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741840_1016 (size=29761) 2024-12-16T18:57:07,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/eb29b5f0b684498192c8f084b738281d 2024-12-16T18:57:07,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/eb29b5f0b684498192c8f084b738281d as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d 2024-12-16T18:57:07,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d, entries=23, sequenceid=37, filesize=29.1 K 2024-12-16T18:57:07,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for fab14af368cb42e95d16937b9023fbb2 in 27ms, sequenceid=37, compaction requested=false 2024-12-16T18:57:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-16T18:57:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d because midkey is the same as first or last row 2024-12-16T18:57:08,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:08,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:08,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:08,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 after 196177ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:57:09,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:09,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:09,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:09,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:57:09,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/62347cad4c544fed85fe838c515f26f9 is 1080, key is row0031/info:/1734375427723/Put/seqid=0 2024-12-16T18:57:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741841_1017 (size=12509) 2024-12-16T18:57:09,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741841_1017 (size=12509) 2024-12-16T18:57:09,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/62347cad4c544fed85fe838c515f26f9 2024-12-16T18:57:09,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/62347cad4c544fed85fe838c515f26f9 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9 2024-12-16T18:57:09,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9, entries=7, sequenceid=47, filesize=12.2 K 2024-12-16T18:57:09,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for fab14af368cb42e95d16937b9023fbb2 in 25ms, sequenceid=47, compaction requested=true 2024-12-16T18:57:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-16T18:57:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d because midkey is the same as first or last row 2024-12-16T18:57:09,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fab14af368cb42e95d16937b9023fbb2:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:09,759 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:09,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-16T18:57:09,761 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:57:09,761 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): fab14af368cb42e95d16937b9023fbb2/info is initiating minor compaction (all files) 2024-12-16T18:57:09,761 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fab14af368cb42e95d16937b9023fbb2/info in TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:09,761 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp, totalSize=53.5 K 2024-12-16T18:57:09,761 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14be8dcaf4184505ac7f672ef219697c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1734375417543 2024-12-16T18:57:09,762 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb29b5f0b684498192c8f084b738281d, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734375417562 2024-12-16T18:57:09,762 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62347cad4c544fed85fe838c515f26f9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1734375427723 2024-12-16T18:57:09,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/341e744a609b46c2a1273984a463c192 is 1080, key is row0038/info:/1734375429733/Put/seqid=0 2024-12-16T18:57:09,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741842_1018 (size=25453) 2024-12-16T18:57:09,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741842_1018 (size=25453) 2024-12-16T18:57:09,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=69 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/341e744a609b46c2a1273984a463c192 2024-12-16T18:57:09,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/341e744a609b46c2a1273984a463c192 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192 2024-12-16T18:57:09,784 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fab14af368cb42e95d16937b9023fbb2#info#compaction#45 average throughput is 9.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:09,785 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/51e696a99b1441f997d54f277e116a87 is 1080, key is row0001/info:/1734375417543/Put/seqid=0 2024-12-16T18:57:09,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192, entries=19, sequenceid=69, filesize=24.9 K 2024-12-16T18:57:09,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=8.41 KB/8608 for fab14af368cb42e95d16937b9023fbb2 in 30ms, sequenceid=69, compaction requested=false 2024-12-16T18:57:09,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:09,789 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=78.4 K, sizeToCheck=16.0 K 2024-12-16T18:57:09,789 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:09,789 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d because midkey is the same as first or last row 2024-12-16T18:57:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741843_1019 (size=44978) 2024-12-16T18:57:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741843_1019 (size=44978) 2024-12-16T18:57:09,798 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/51e696a99b1441f997d54f277e116a87 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 2024-12-16T18:57:09,804 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fab14af368cb42e95d16937b9023fbb2/info of fab14af368cb42e95d16937b9023fbb2 into 51e696a99b1441f997d54f277e116a87(size=43.9 K), total size for store is 68.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:09,804 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2., storeName=fab14af368cb42e95d16937b9023fbb2/info, priority=13, startTime=1734375429759; duration=0sec 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=68.8 K, sizeToCheck=16.0 K 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 because midkey is the same as first or last row 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:09,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fab14af368cb42e95d16937b9023fbb2:info 2024-12-16T18:57:10,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:10,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:10,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:11,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:11,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:11,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:11,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-16T18:57:11,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4103f14dfefe43f098b0cee45e74e2d4 is 1080, key is row0057/info:/1734375429759/Put/seqid=0 2024-12-16T18:57:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741844_1020 (size=14663) 2024-12-16T18:57:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741844_1020 (size=14663) 2024-12-16T18:57:11,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4103f14dfefe43f098b0cee45e74e2d4 2024-12-16T18:57:11,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4103f14dfefe43f098b0cee45e74e2d4 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4 2024-12-16T18:57:11,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4, entries=9, sequenceid=82, filesize=14.3 K 2024-12-16T18:57:11,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=17.86 KB/18292 for fab14af368cb42e95d16937b9023fbb2 in 30ms, sequenceid=82, compaction requested=true 2024-12-16T18:57:11,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:11,803 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=83.1 K, sizeToCheck=16.0 K 2024-12-16T18:57:11,803 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:11,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:11,804 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 because midkey is the same as first or last row 2024-12-16T18:57:11,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fab14af368cb42e95d16937b9023fbb2:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:11,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:11,804 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:11,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-16T18:57:11,805 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:57:11,805 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): fab14af368cb42e95d16937b9023fbb2/info is initiating minor compaction (all files) 2024-12-16T18:57:11,805 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fab14af368cb42e95d16937b9023fbb2/info in TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:11,805 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp, totalSize=83.1 K 2024-12-16T18:57:11,806 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51e696a99b1441f997d54f277e116a87, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1734375417543 2024-12-16T18:57:11,806 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 341e744a609b46c2a1273984a463c192, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=69, earliestPutTs=1734375429733 2024-12-16T18:57:11,807 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4103f14dfefe43f098b0cee45e74e2d4, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1734375429759 2024-12-16T18:57:11,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/f262ce3eadeb458d93b424eb7d375bd8 is 1080, key is row0066/info:/1734375431774/Put/seqid=0 2024-12-16T18:57:11,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741845_1021 (size=24376) 2024-12-16T18:57:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741845_1021 (size=24376) 2024-12-16T18:57:11,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/f262ce3eadeb458d93b424eb7d375bd8 2024-12-16T18:57:11,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:57:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375441823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:11,825 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fab14af368cb42e95d16937b9023fbb2#info#compaction#48 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:11,826 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/d1496b1dc1d345a087f65afc77b7a8f8 is 1080, key is row0001/info:/1734375417543/Put/seqid=0 2024-12-16T18:57:11,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/f262ce3eadeb458d93b424eb7d375bd8 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/f262ce3eadeb458d93b424eb7d375bd8 2024-12-16T18:57:11,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741846_1022 (size=75378) 2024-12-16T18:57:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741846_1022 (size=75378) 2024-12-16T18:57:11,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/f262ce3eadeb458d93b424eb7d375bd8, entries=18, sequenceid=103, filesize=23.8 K 2024-12-16T18:57:11,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for fab14af368cb42e95d16937b9023fbb2 in 31ms, sequenceid=103, compaction requested=false 2024-12-16T18:57:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=106.9 K, sizeToCheck=16.0 K 2024-12-16T18:57:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:11,835 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 because midkey is the same as first or last row 2024-12-16T18:57:11,838 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/d1496b1dc1d345a087f65afc77b7a8f8 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8 2024-12-16T18:57:11,845 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fab14af368cb42e95d16937b9023fbb2/info of fab14af368cb42e95d16937b9023fbb2 into d1496b1dc1d345a087f65afc77b7a8f8(size=73.6 K), total size for store is 97.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:11,845 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:11,845 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2., storeName=fab14af368cb42e95d16937b9023fbb2/info, priority=13, startTime=1734375431804; duration=0sec 2024-12-16T18:57:11,846 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=97.4 K, sizeToCheck=16.0 K 2024-12-16T18:57:11,846 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-16T18:57:11,847 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:11,847 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:11,847 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fab14af368cb42e95d16937b9023fbb2:info 2024-12-16T18:57:11,848 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] assignment.AssignmentManager(1346): Split request from 7a6e4bf70377,42969,1734375405882, parent={ENCODED => fab14af368cb42e95d16937b9023fbb2, NAME => 'TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-16T18:57:11,853 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:11,858 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34203 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fab14af368cb42e95d16937b9023fbb2, daughterA=6c92de9f8fe3ea13408be58d6f2c0c24, daughterB=73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:11,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fab14af368cb42e95d16937b9023fbb2, daughterA=6c92de9f8fe3ea13408be58d6f2c0c24, daughterB=73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:11,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fab14af368cb42e95d16937b9023fbb2, daughterA=6c92de9f8fe3ea13408be58d6f2c0c24, daughterB=73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:11,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fab14af368cb42e95d16937b9023fbb2, daughterA=6c92de9f8fe3ea13408be58d6f2c0c24, daughterB=73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:11,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, UNASSIGN}] 2024-12-16T18:57:11,869 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, UNASSIGN 2024-12-16T18:57:11,869 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=fab14af368cb42e95d16937b9023fbb2, regionState=CLOSING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:11,871 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-16T18:57:11,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:57:12,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,028 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,029 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-16T18:57:12,029 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing fab14af368cb42e95d16937b9023fbb2, disabling compactions & flushes 2024-12-16T18:57:12,029 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:12,029 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:12,029 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. after waiting 0 ms 2024-12-16T18:57:12,029 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:12,029 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing fab14af368cb42e95d16937b9023fbb2 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-16T18:57:12,034 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4011e518faa141118ae139ee5773ec90 is 1080, key is row0084/info:/1734375431804/Put/seqid=0 2024-12-16T18:57:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741847_1023 (size=16817) 2024-12-16T18:57:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741847_1023 (size=16817) 2024-12-16T18:57:12,039 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4011e518faa141118ae139ee5773ec90 2024-12-16T18:57:12,045 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/.tmp/info/4011e518faa141118ae139ee5773ec90 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4011e518faa141118ae139ee5773ec90 2024-12-16T18:57:12,050 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4011e518faa141118ae139ee5773ec90, entries=11, sequenceid=118, filesize=16.4 K 2024-12-16T18:57:12,052 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for fab14af368cb42e95d16937b9023fbb2 in 23ms, sequenceid=118, compaction requested=true 2024-12-16T18:57:12,053 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4] to archive 2024-12-16T18:57:12,054 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T18:57:12,057 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/51e696a99b1441f997d54f277e116a87 2024-12-16T18:57:12,057 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/eb29b5f0b684498192c8f084b738281d 2024-12-16T18:57:12,057 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/62347cad4c544fed85fe838c515f26f9 2024-12-16T18:57:12,057 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/14be8dcaf4184505ac7f672ef219697c 2024-12-16T18:57:12,059 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4103f14dfefe43f098b0cee45e74e2d4 2024-12-16T18:57:12,059 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/341e744a609b46c2a1273984a463c192 2024-12-16T18:57:12,065 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-12-16T18:57:12,066 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. 2024-12-16T18:57:12,066 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for fab14af368cb42e95d16937b9023fbb2: 2024-12-16T18:57:12,068 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,068 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=fab14af368cb42e95d16937b9023fbb2, regionState=CLOSED 2024-12-16T18:57:12,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-16T18:57:12,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure fab14af368cb42e95d16937b9023fbb2, server=7a6e4bf70377,42969,1734375405882 in 197 msec 2024-12-16T18:57:12,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-16T18:57:12,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=fab14af368cb42e95d16937b9023fbb2, UNASSIGN in 204 msec 2024-12-16T18:57:12,083 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:57:12,084 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=fab14af368cb42e95d16937b9023fbb2, threads=3 2024-12-16T18:57:12,085 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4011e518faa141118ae139ee5773ec90 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,085 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,085 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/f262ce3eadeb458d93b424eb7d375bd8 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:12,095 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/f262ce3eadeb458d93b424eb7d375bd8, top=true 2024-12-16T18:57:12,101 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4011e518faa141118ae139ee5773ec90, top=true 2024-12-16T18:57:12,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741848_1024 (size=27) 2024-12-16T18:57:12,108 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8 for child: 73b443c21ee806be6a66ecec93f40711, parent: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,108 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/f262ce3eadeb458d93b424eb7d375bd8 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741848_1024 (size=27) 2024-12-16T18:57:12,114 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90 for child: 73b443c21ee806be6a66ecec93f40711, parent: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,114 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/4011e518faa141118ae139ee5773ec90 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741849_1025 (size=27) 2024-12-16T18:57:12,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741849_1025 (size=27) 2024-12-16T18:57:12,126 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8 for region: fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:57:12,126 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region fab14af368cb42e95d16937b9023fbb2 Daughter A: [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2] storefiles, Daughter B: [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8] storefiles. 2024-12-16T18:57:12,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741850_1026 (size=71) 2024-12-16T18:57:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741850_1026 (size=71) 2024-12-16T18:57:12,140 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:57:12,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741851_1027 (size=71) 2024-12-16T18:57:12,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741851_1027 (size=71) 2024-12-16T18:57:12,162 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:57:12,173 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-16T18:57:12,175 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-16T18:57:12,177 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1734375432176"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1734375432176"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1734375432176"}]},"ts":"1734375432176"} 2024-12-16T18:57:12,177 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1734375432176"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375432176"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1734375432176"}]},"ts":"1734375432176"} 2024-12-16T18:57:12,177 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1734375432176"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375432176"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1734375432176"}]},"ts":"1734375432176"} 2024-12-16T18:57:12,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42969 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-16T18:57:12,206 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-16T18:57:12,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-16T18:57:12,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6c92de9f8fe3ea13408be58d6f2c0c24, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73b443c21ee806be6a66ecec93f40711, ASSIGN}] 2024-12-16T18:57:12,212 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73b443c21ee806be6a66ecec93f40711, ASSIGN 2024-12-16T18:57:12,212 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6c92de9f8fe3ea13408be58d6f2c0c24, ASSIGN 2024-12-16T18:57:12,213 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73b443c21ee806be6a66ecec93f40711, ASSIGN; state=SPLITTING_NEW, location=7a6e4bf70377,42969,1734375405882; forceNewPlan=false, retain=false 2024-12-16T18:57:12,222 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6c92de9f8fe3ea13408be58d6f2c0c24, ASSIGN; state=SPLITTING_NEW, location=7a6e4bf70377,42969,1734375405882; forceNewPlan=false, retain=false 2024-12-16T18:57:12,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/d02942a0058c47689b317774ea9722ed is 193, key is TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711./info:regioninfo/1734375432176/Put/seqid=0 2024-12-16T18:57:12,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741852_1028 (size=9423) 2024-12-16T18:57:12,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741852_1028 (size=9423) 2024-12-16T18:57:12,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/d02942a0058c47689b317774ea9722ed 2024-12-16T18:57:12,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/table/8de4421035164a43b7146d4db2a99f97 is 65, key is TestLogRolling-testLogRolling/table:state/1734375407911/Put/seqid=0 2024-12-16T18:57:12,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741853_1029 (size=5412) 2024-12-16T18:57:12,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741853_1029 (size=5412) 2024-12-16T18:57:12,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/table/8de4421035164a43b7146d4db2a99f97 2024-12-16T18:57:12,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/d02942a0058c47689b317774ea9722ed as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/info/d02942a0058c47689b317774ea9722ed 2024-12-16T18:57:12,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/info/d02942a0058c47689b317774ea9722ed, entries=29, sequenceid=17, filesize=9.2 K 2024-12-16T18:57:12,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/table/8de4421035164a43b7146d4db2a99f97 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/table/8de4421035164a43b7146d4db2a99f97 2024-12-16T18:57:12,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/table/8de4421035164a43b7146d4db2a99f97, entries=4, sequenceid=17, filesize=5.3 K 2024-12-16T18:57:12,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 81ms, sequenceid=17, compaction requested=false 2024-12-16T18:57:12,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-16T18:57:12,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:12,363 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=73b443c21ee806be6a66ecec93f40711, regionState=OPENING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,363 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=6c92de9f8fe3ea13408be58d6f2c0c24, regionState=OPENING, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; OpenRegionProcedure 73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:57:12,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=15, state=RUNNABLE; OpenRegionProcedure 6c92de9f8fe3ea13408be58d6f2c0c24, server=7a6e4bf70377,42969,1734375405882}] 2024-12-16T18:57:12,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:12,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,523 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:57:12,523 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => 6c92de9f8fe3ea13408be58d6f2c0c24, NAME => 'TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-16T18:57:12,524 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,524 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:57:12,524 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,524 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,527 INFO [StoreOpener-6c92de9f8fe3ea13408be58d6f2c0c24-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,529 INFO [StoreOpener-6c92de9f8fe3ea13408be58d6f2c0c24-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c92de9f8fe3ea13408be58d6f2c0c24 columnFamilyName info 2024-12-16T18:57:12,529 DEBUG [StoreOpener-6c92de9f8fe3ea13408be58d6f2c0c24-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:57:12,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-16T18:57:12,543 DEBUG [StoreOpener-6c92de9f8fe3ea13408be58d6f2c0c24-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-bottom 2024-12-16T18:57:12,543 INFO [StoreOpener-6c92de9f8fe3ea13408be58d6f2c0c24-1 {}] regionserver.HStore(327): Store=6c92de9f8fe3ea13408be58d6f2c0c24/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:57:12,544 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,545 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,548 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:57:12,549 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened 6c92de9f8fe3ea13408be58d6f2c0c24; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808447, jitterRate=0.027994126081466675}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:57:12,550 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for 6c92de9f8fe3ea13408be58d6f2c0c24: 2024-12-16T18:57:12,551 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24., pid=18, masterSystemTime=1734375432517 2024-12-16T18:57:12,551 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store 6c92de9f8fe3ea13408be58d6f2c0c24:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:12,551 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-16T18:57:12,551 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:12,552 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:57:12,552 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 6c92de9f8fe3ea13408be58d6f2c0c24/info is initiating minor compaction (all files) 2024-12-16T18:57:12,552 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c92de9f8fe3ea13408be58d6f2c0c24/info in TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:57:12,552 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-bottom] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/.tmp, totalSize=73.6 K 2024-12-16T18:57:12,553 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1734375417543 2024-12-16T18:57:12,553 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:57:12,553 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:57:12,553 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:12,553 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => 73b443c21ee806be6a66ecec93f40711, NAME => 'TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-16T18:57:12,554 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=6c92de9f8fe3ea13408be58d6f2c0c24, regionState=OPEN, openSeqNum=122, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:57:12,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,555 INFO [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,556 INFO [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73b443c21ee806be6a66ecec93f40711 columnFamilyName info 2024-12-16T18:57:12,556 DEBUG [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:57:12,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=15 2024-12-16T18:57:12,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=15, state=SUCCESS; OpenRegionProcedure 6c92de9f8fe3ea13408be58d6f2c0c24, server=7a6e4bf70377,42969,1734375405882 in 189 msec 2024-12-16T18:57:12,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6c92de9f8fe3ea13408be58d6f2c0c24, ASSIGN in 346 msec 2024-12-16T18:57:12,565 DEBUG [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90 2024-12-16T18:57:12,570 DEBUG [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8 2024-12-16T18:57:12,576 DEBUG [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-top 2024-12-16T18:57:12,576 INFO [StoreOpener-73b443c21ee806be6a66ecec93f40711-1 {}] regionserver.HStore(327): Store=73b443c21ee806be6a66ecec93f40711/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:57:12,576 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c92de9f8fe3ea13408be58d6f2c0c24#info#compaction#52 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:12,577 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/.tmp/info/e279d5958ac049a0afe07f1be507e5a5 is 1080, key is row0001/info:/1734375417543/Put/seqid=0 2024-12-16T18:57:12,577 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,578 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,580 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:12,581 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened 73b443c21ee806be6a66ecec93f40711; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716872, jitterRate=-0.08845089375972748}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:57:12,581 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:12,582 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., pid=17, masterSystemTime=1734375432517 2024-12-16T18:57:12,582 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:12,582 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 2 2024-12-16T18:57:12,582 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:12,583 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:12,583 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:57:12,583 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:12,584 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-top, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=113.8 K 2024-12-16T18:57:12,584 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] compactions.Compactor(224): Compacting d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1734375417543 2024-12-16T18:57:12,584 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1734375431774 2024-12-16T18:57:12,585 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734375431804 2024-12-16T18:57:12,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741854_1030 (size=70862) 2024-12-16T18:57:12,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741854_1030 (size=70862) 2024-12-16T18:57:12,587 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:12,587 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:12,588 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=73b443c21ee806be6a66ecec93f40711, regionState=OPEN, openSeqNum=122, regionLocation=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:12,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-16T18:57:12,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; OpenRegionProcedure 73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 in 224 msec 2024-12-16T18:57:12,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=12 2024-12-16T18:57:12,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73b443c21ee806be6a66ecec93f40711, ASSIGN in 380 msec 2024-12-16T18:57:12,595 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=fab14af368cb42e95d16937b9023fbb2, daughterA=6c92de9f8fe3ea13408be58d6f2c0c24, daughterB=73b443c21ee806be6a66ecec93f40711 in 740 msec 2024-12-16T18:57:12,608 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#53 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:12,608 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/4656e5b7b396435d936c22c9cd9317e5 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:57:12,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741855_1031 (size=40830) 2024-12-16T18:57:12,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741855_1031 (size=40830) 2024-12-16T18:57:12,619 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/4656e5b7b396435d936c22c9cd9317e5 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/4656e5b7b396435d936c22c9cd9317e5 2024-12-16T18:57:12,626 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into 4656e5b7b396435d936c22c9cd9317e5(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:12,626 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:12,626 INFO [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375432582; duration=0sec 2024-12-16T18:57:12,626 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:12,626 DEBUG [RS:0;7a6e4bf70377:42969-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:57:12,993 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/.tmp/info/e279d5958ac049a0afe07f1be507e5a5 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/e279d5958ac049a0afe07f1be507e5a5 2024-12-16T18:57:13,000 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in 6c92de9f8fe3ea13408be58d6f2c0c24/info of 6c92de9f8fe3ea13408be58d6f2c0c24 into e279d5958ac049a0afe07f1be507e5a5(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:13,000 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c92de9f8fe3ea13408be58d6f2c0c24: 2024-12-16T18:57:13,000 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24., storeName=6c92de9f8fe3ea13408be58d6f2c0c24/info, priority=15, startTime=1734375432551; duration=0sec 2024-12-16T18:57:13,000 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:13,000 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c92de9f8fe3ea13408be58d6f2c0c24:info 2024-12-16T18:57:13,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:13,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:13,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:14,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:14,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:14,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:15,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:15,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:15,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:15,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:57:16,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:16,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:16,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:17,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:17,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,338 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:57:17,339 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:57:17,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:17,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:17,616 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:57:17,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:17,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:57:18,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:18,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:18,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:19,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:19,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:19,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:20,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:20,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:20,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:21,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:21,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:21,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:21,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375451921, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1734375407530.fab14af368cb42e95d16937b9023fbb2. is not online on 7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:22,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:22,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:22,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:22,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-16T18:57:22,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-16T18:57:23,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:23,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:23,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:24,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:24,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:24,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:25,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:25,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 after 196149ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:57:25,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:25,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:26,017 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 23930 2024-12-16T18:57:26,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:26,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:26,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:27,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:27,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:27,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:27,558 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T18:57:27,558 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T18:57:28,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:28,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:28,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:29,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:29,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:29,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:30,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:30,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:30,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:31,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:31,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:31,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:31,850 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-16T18:57:32,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:32,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:32,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:33,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:33,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:33,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:34,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:34,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:34,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:35,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:35,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:35,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:36,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:36,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:36,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:37,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:37,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:37,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:38,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:38,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:38,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:39,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:39,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:39,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:40,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:40,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:40,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:41,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:41,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:41,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:42,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:42,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:42,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:43,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:43,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:43,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:44,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:44,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:57:44,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/642e6e8c93e84c13ac3deb92cc05488f is 1080, key is row0095/info:/1734375462042/Put/seqid=0 2024-12-16T18:57:44,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741856_1032 (size=12513) 2024-12-16T18:57:44,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741856_1032 (size=12513) 2024-12-16T18:57:44,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/642e6e8c93e84c13ac3deb92cc05488f 2024-12-16T18:57:44,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/642e6e8c93e84c13ac3deb92cc05488f as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f 2024-12-16T18:57:44,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f, entries=7, sequenceid=132, filesize=12.2 K 2024-12-16T18:57:44,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 73b443c21ee806be6a66ecec93f40711 in 27ms, sequenceid=132, compaction requested=false 2024-12-16T18:57:44,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:44,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:44,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-16T18:57:44,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/1c39b753bd3a40aa9940e6a49dd447b4 is 1080, key is row0102/info:/1734375464054/Put/seqid=0 2024-12-16T18:57:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741857_1033 (size=25472) 2024-12-16T18:57:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741857_1033 (size=25472) 2024-12-16T18:57:44,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/1c39b753bd3a40aa9940e6a49dd447b4 2024-12-16T18:57:44,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/1c39b753bd3a40aa9940e6a49dd447b4 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4 2024-12-16T18:57:44,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4, entries=19, sequenceid=154, filesize=24.9 K 2024-12-16T18:57:44,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=8.41 KB/8608 for 73b443c21ee806be6a66ecec93f40711 in 21ms, sequenceid=154, compaction requested=true 2024-12-16T18:57:44,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:44,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:44,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:44,102 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:44,103 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:57:44,103 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:57:44,103 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:44,103 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/4656e5b7b396435d936c22c9cd9317e5, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=77.0 K 2024-12-16T18:57:44,104 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4656e5b7b396435d936c22c9cd9317e5, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734375429768 2024-12-16T18:57:44,104 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 642e6e8c93e84c13ac3deb92cc05488f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734375462042 2024-12-16T18:57:44,104 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c39b753bd3a40aa9940e6a49dd447b4, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734375464054 2024-12-16T18:57:44,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:44,116 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#56 average throughput is 30.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:44,116 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/c4c30efe4ddc4dba8bf5517635ff9990 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:57:44,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741858_1034 (size=69025) 2024-12-16T18:57:44,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741858_1034 (size=69025) 2024-12-16T18:57:44,127 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/c4c30efe4ddc4dba8bf5517635ff9990 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c4c30efe4ddc4dba8bf5517635ff9990 2024-12-16T18:57:44,134 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into c4c30efe4ddc4dba8bf5517635ff9990(size=67.4 K), total size for store is 67.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:44,134 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:44,134 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375464102; duration=0sec 2024-12-16T18:57:44,135 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:44,135 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:57:44,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:44,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta after 196148ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T18:57:44,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:45,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:45,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:45,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:45,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:57:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:46,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-16T18:57:46,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/332d4291106d486cbd40082519e6e47b is 1080, key is row0121/info:/1734375464081/Put/seqid=0 2024-12-16T18:57:46,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741859_1035 (size=14672) 2024-12-16T18:57:46,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741859_1035 (size=14672) 2024-12-16T18:57:46,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/332d4291106d486cbd40082519e6e47b 2024-12-16T18:57:46,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/332d4291106d486cbd40082519e6e47b as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b 2024-12-16T18:57:46,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b, entries=9, sequenceid=167, filesize=14.3 K 2024-12-16T18:57:46,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=17.86 KB/18292 for 73b443c21ee806be6a66ecec93f40711 in 24ms, sequenceid=167, compaction requested=false 2024-12-16T18:57:46,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:46,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-16T18:57:46,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/8ffb7de35e7740168b6ba8de4d0ca999 is 1080, key is row0130/info:/1734375466094/Put/seqid=0 2024-12-16T18:57:46,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741860_1036 (size=24394) 2024-12-16T18:57:46,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741860_1036 (size=24394) 2024-12-16T18:57:46,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:57:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375476137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:46,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:46,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:46,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/8ffb7de35e7740168b6ba8de4d0ca999 2024-12-16T18:57:46,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/8ffb7de35e7740168b6ba8de4d0ca999 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999 2024-12-16T18:57:46,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999, entries=18, sequenceid=188, filesize=23.8 K 2024-12-16T18:57:46,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 73b443c21ee806be6a66ecec93f40711 in 434ms, sequenceid=188, compaction requested=true 2024-12-16T18:57:46,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:46,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:46,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:46,552 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:46,553 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 108091 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:57:46,554 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:57:46,554 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:46,554 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c4c30efe4ddc4dba8bf5517635ff9990, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=105.6 K 2024-12-16T18:57:46,554 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c30efe4ddc4dba8bf5517635ff9990, keycount=59, bloomtype=ROW, size=67.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734375429768 2024-12-16T18:57:46,555 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 332d4291106d486cbd40082519e6e47b, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1734375464081 2024-12-16T18:57:46,555 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ffb7de35e7740168b6ba8de4d0ca999, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1734375466094 2024-12-16T18:57:46,566 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#59 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:46,567 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/2a1ef71e441f447d816625bda9c0a0d2 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:57:46,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741861_1037 (size=98310) 2024-12-16T18:57:46,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741861_1037 (size=98310) 2024-12-16T18:57:46,576 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/2a1ef71e441f447d816625bda9c0a0d2 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/2a1ef71e441f447d816625bda9c0a0d2 2024-12-16T18:57:46,582 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into 2a1ef71e441f447d816625bda9c0a0d2(size=96.0 K), total size for store is 96.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:46,582 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:46,582 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375466552; duration=0sec 2024-12-16T18:57:46,582 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:46,582 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:57:47,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:47,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:47,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:47,596 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 58ec97c1298be15af22bf03642bc8ca9 changed from -1.0 to 0.0, refreshing cache 2024-12-16T18:57:48,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:48,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:49,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:49,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:49,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:50,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:50,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:50,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:51,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:51,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:51,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:52,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:52,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:52,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:53,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:53,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:53,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:54,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:54,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:54,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:55,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:55,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:55,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:56,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:56,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-16T18:57:56,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/0d2e9724b31e41baa370274dfadc0ef3 is 1080, key is row0148/info:/1734375466118/Put/seqid=0 2024-12-16T18:57:56,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741862_1038 (size=17906) 2024-12-16T18:57:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741862_1038 (size=17906) 2024-12-16T18:57:56,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/0d2e9724b31e41baa370274dfadc0ef3 2024-12-16T18:57:56,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/0d2e9724b31e41baa370274dfadc0ef3 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3 2024-12-16T18:57:56,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3, entries=12, sequenceid=204, filesize=17.5 K 2024-12-16T18:57:56,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 73b443c21ee806be6a66ecec93f40711 in 30ms, sequenceid=204, compaction requested=false 2024-12-16T18:57:56,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:56,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:56,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:57,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:57,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:57,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:57,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6c92de9f8fe3ea13408be58d6f2c0c24, had cached 0 bytes from a total of 70862 2024-12-16T18:57:57,554 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73b443c21ee806be6a66ecec93f40711, had cached 0 bytes from a total of 116216 2024-12-16T18:57:58,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:58,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:57:58,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:57:58,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/b7dcae6c915e4affaf1784404db6eea4 is 1080, key is row0160/info:/1734375476221/Put/seqid=0 2024-12-16T18:57:58,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741863_1039 (size=12516) 2024-12-16T18:57:58,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741863_1039 (size=12516) 2024-12-16T18:57:58,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:57:58,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375488284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:57:58,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:58,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:58,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/b7dcae6c915e4affaf1784404db6eea4 2024-12-16T18:57:58,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/b7dcae6c915e4affaf1784404db6eea4 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4 2024-12-16T18:57:58,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4, entries=7, sequenceid=214, filesize=12.2 K 2024-12-16T18:57:58,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 73b443c21ee806be6a66ecec93f40711 in 448ms, sequenceid=214, compaction requested=true 2024-12-16T18:57:58,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:58,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:57:58,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:58,681 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:57:58,682 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:57:58,682 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:57:58,682 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:57:58,683 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/2a1ef71e441f447d816625bda9c0a0d2, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=125.7 K 2024-12-16T18:57:58,683 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a1ef71e441f447d816625bda9c0a0d2, keycount=86, bloomtype=ROW, size=96.0 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1734375429768 2024-12-16T18:57:58,683 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d2e9724b31e41baa370274dfadc0ef3, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734375466118 2024-12-16T18:57:58,683 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7dcae6c915e4affaf1784404db6eea4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734375476221 2024-12-16T18:57:58,695 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#62 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:57:58,695 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/f6e4c22fbe0b4e1fa39f7bca5dc22991 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:57:58,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741864_1040 (size=118898) 2024-12-16T18:57:58,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741864_1040 (size=118898) 2024-12-16T18:57:58,707 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/f6e4c22fbe0b4e1fa39f7bca5dc22991 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/f6e4c22fbe0b4e1fa39f7bca5dc22991 2024-12-16T18:57:58,713 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into f6e4c22fbe0b4e1fa39f7bca5dc22991(size=116.1 K), total size for store is 116.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:57:58,713 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:57:58,713 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375478681; duration=0sec 2024-12-16T18:57:58,713 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:57:58,713 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:57:59,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:59,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:57:59,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:00,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:00,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:00,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:01,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:01,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:01,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:02,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:02,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:02,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:03,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:03,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:03,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:04,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:04,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:04,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:05,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:05,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:05,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:06,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:06,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:06,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:07,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:07,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:07,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:08,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:08,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-16T18:58:08,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/758ff5eeb9fa4ab5982f7ed9edd772a0 is 1080, key is row0167/info:/1734375478235/Put/seqid=0 2024-12-16T18:58:08,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741865_1041 (size=29784) 2024-12-16T18:58:08,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741865_1041 (size=29784) 2024-12-16T18:58:08,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/758ff5eeb9fa4ab5982f7ed9edd772a0 2024-12-16T18:58:08,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/758ff5eeb9fa4ab5982f7ed9edd772a0 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0 2024-12-16T18:58:08,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0, entries=23, sequenceid=241, filesize=29.1 K 2024-12-16T18:58:08,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 73b443c21ee806be6a66ecec93f40711 in 30ms, sequenceid=241, compaction requested=false 2024-12-16T18:58:08,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:08,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:08,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:09,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:09,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:09,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:10,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:10,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-16T18:58:10,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/fd546bcff1314164942b2c3d3162d135 is 1080, key is row0190/info:/1734375488333/Put/seqid=0 2024-12-16T18:58:10,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741866_1042 (size=12516) 2024-12-16T18:58:10,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741866_1042 (size=12516) 2024-12-16T18:58:10,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/fd546bcff1314164942b2c3d3162d135 2024-12-16T18:58:10,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/fd546bcff1314164942b2c3d3162d135 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135 2024-12-16T18:58:10,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135, entries=7, sequenceid=251, filesize=12.2 K 2024-12-16T18:58:10,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 73b443c21ee806be6a66ecec93f40711 in 32ms, sequenceid=251, compaction requested=true 2024-12-16T18:58:10,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:10,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:58:10,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:10,373 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:58:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:10,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-16T18:58:10,375 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161198 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:58:10,375 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:58:10,375 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:10,375 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/f6e4c22fbe0b4e1fa39f7bca5dc22991, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=157.4 K 2024-12-16T18:58:10,376 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6e4c22fbe0b4e1fa39f7bca5dc22991, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734375429768 2024-12-16T18:58:10,377 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 758ff5eeb9fa4ab5982f7ed9edd772a0, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734375478235 2024-12-16T18:58:10,377 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd546bcff1314164942b2c3d3162d135, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734375488333 2024-12-16T18:58:10,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/24190ee559f04470a0be0e814b93e8af is 1080, key is row0197/info:/1734375490342/Put/seqid=0 2024-12-16T18:58:10,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741867_1043 (size=22251) 2024-12-16T18:58:10,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741867_1043 (size=22251) 2024-12-16T18:58:10,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/24190ee559f04470a0be0e814b93e8af 2024-12-16T18:58:10,394 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#66 average throughput is 34.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:58:10,394 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/8f1791fe96004d9886706a86c03ada70 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:58:10,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:10,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/24190ee559f04470a0be0e814b93e8af as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af 2024-12-16T18:58:10,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af, entries=16, sequenceid=270, filesize=21.7 K 2024-12-16T18:58:10,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 73b443c21ee806be6a66ecec93f40711 in 34ms, sequenceid=270, compaction requested=false 2024-12-16T18:58:10,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741868_1044 (size=151545) 2024-12-16T18:58:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741868_1044 (size=151545) 2024-12-16T18:58:10,424 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/8f1791fe96004d9886706a86c03ada70 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8f1791fe96004d9886706a86c03ada70 2024-12-16T18:58:10,429 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into 8f1791fe96004d9886706a86c03ada70(size=148.0 K), total size for store is 169.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:58:10,429 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:10,429 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375490373; duration=0sec 2024-12-16T18:58:10,429 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:10,429 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:58:10,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:11,017 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f41842edafcf2308940ff2b22b3c218f, had cached 0 bytes from a total of 23930 2024-12-16T18:58:11,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:11,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:11,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:12,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:12,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:12,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-16T18:58:12,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:12,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/34825c18fc634f93b8d4f281d1a5ff1d is 1080, key is row0213/info:/1734375490375/Put/seqid=0 2024-12-16T18:58:12,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:58:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375502422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:12,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741869_1045 (size=19013) 2024-12-16T18:58:12,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741869_1045 (size=19013) 2024-12-16T18:58:12,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/34825c18fc634f93b8d4f281d1a5ff1d 2024-12-16T18:58:12,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/34825c18fc634f93b8d4f281d1a5ff1d as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d 2024-12-16T18:58:12,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d, entries=13, sequenceid=287, filesize=18.6 K 2024-12-16T18:58:12,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 73b443c21ee806be6a66ecec93f40711 in 37ms, sequenceid=287, compaction requested=true 2024-12-16T18:58:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:58:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:12,437 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:58:12,438 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:58:12,438 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:58:12,438 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:12,438 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8f1791fe96004d9886706a86c03ada70, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=188.3 K 2024-12-16T18:58:12,439 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f1791fe96004d9886706a86c03ada70, keycount=135, bloomtype=ROW, size=148.0 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734375429768 2024-12-16T18:58:12,439 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24190ee559f04470a0be0e814b93e8af, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734375490342 2024-12-16T18:58:12,440 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34825c18fc634f93b8d4f281d1a5ff1d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734375490375 2024-12-16T18:58:12,450 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#68 average throughput is 84.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:58:12,451 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/11aacd3108d74cf1be56837f69fb6402 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:58:12,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741870_1046 (size=182943) 2024-12-16T18:58:12,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741870_1046 (size=182943) 2024-12-16T18:58:12,459 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/11aacd3108d74cf1be56837f69fb6402 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/11aacd3108d74cf1be56837f69fb6402 2024-12-16T18:58:12,464 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into 11aacd3108d74cf1be56837f69fb6402(size=178.7 K), total size for store is 178.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:58:12,464 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:12,464 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375492437; duration=0sec 2024-12-16T18:58:12,464 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:12,464 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:58:12,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:13,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:13,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:13,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:14,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:14,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:14,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:15,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:15,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:15,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:15,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T18:58:16,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:16,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:16,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:16,851 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-16T18:58:17,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:17,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:17,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:18,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:18,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:18,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:19,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:19,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:19,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:20,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:20,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:20,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:21,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:21,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:21,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:22,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:22,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:22,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:22,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-16T18:58:22,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/c2e416b6852e4878ac39b75a41749cc1 is 1080, key is row0226/info:/1734375492400/Put/seqid=0 2024-12-16T18:58:22,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741871_1047 (size=23333) 2024-12-16T18:58:22,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741871_1047 (size=23333) 2024-12-16T18:58:22,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/c2e416b6852e4878ac39b75a41749cc1 2024-12-16T18:58:22,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/c2e416b6852e4878ac39b75a41749cc1 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1 2024-12-16T18:58:22,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T18:58:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35578 deadline: 1734375512463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73b443c21ee806be6a66ecec93f40711, server=7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:22,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1, entries=17, sequenceid=308, filesize=22.8 K 2024-12-16T18:58:22,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 73b443c21ee806be6a66ecec93f40711 in 33ms, sequenceid=308, compaction requested=false 2024-12-16T18:58:22,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:22,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:23,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:23,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:23,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:24,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:24,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:24,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:25,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:25,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:25,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:26,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:26,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:26,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:27,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:27,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:27,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:28,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:28,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:28,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:29,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:29,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:29,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:30,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:30,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:30,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:31,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:31,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:31,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:32,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:32,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:32,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:32,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] regionserver.HRegion(8581): Flush requested on 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:32,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-16T18:58:32,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/78e3545f38c24afca2d0ebec4e9b2218 is 1080, key is row0243/info:/1734375502437/Put/seqid=0 2024-12-16T18:58:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741872_1048 (size=19013) 2024-12-16T18:58:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741872_1048 (size=19013) 2024-12-16T18:58:32,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/78e3545f38c24afca2d0ebec4e9b2218 2024-12-16T18:58:32,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/78e3545f38c24afca2d0ebec4e9b2218 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218 2024-12-16T18:58:32,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218, entries=13, sequenceid=324, filesize=18.6 K 2024-12-16T18:58:32,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for 73b443c21ee806be6a66ecec93f40711 in 464ms, sequenceid=324, compaction requested=true 2024-12-16T18:58:32,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:32,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73b443c21ee806be6a66ecec93f40711:info, priority=-2147483648, current under compaction store size is 1 2024-12-16T18:58:32,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:32,986 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-16T18:58:32,987 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 225289 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-16T18:58:32,987 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1540): 73b443c21ee806be6a66ecec93f40711/info is initiating minor compaction (all files) 2024-12-16T18:58:32,987 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73b443c21ee806be6a66ecec93f40711/info in TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:32,988 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/11aacd3108d74cf1be56837f69fb6402, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218] into tmpdir=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp, totalSize=220.0 K 2024-12-16T18:58:32,988 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11aacd3108d74cf1be56837f69fb6402, keycount=164, bloomtype=ROW, size=178.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734375429768 2024-12-16T18:58:32,988 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2e416b6852e4878ac39b75a41749cc1, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1734375492400 2024-12-16T18:58:32,989 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78e3545f38c24afca2d0ebec4e9b2218, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1734375502437 2024-12-16T18:58:33,019 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73b443c21ee806be6a66ecec93f40711#info#compaction#71 average throughput is 49.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-16T18:58:33,020 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/ecc875e0fabc4b358a17799293e76bf4 is 1080, key is row0062/info:/1734375429768/Put/seqid=0 2024-12-16T18:58:33,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741873_1049 (size=215492) 2024-12-16T18:58:33,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741873_1049 (size=215492) 2024-12-16T18:58:33,062 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/ecc875e0fabc4b358a17799293e76bf4 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/ecc875e0fabc4b358a17799293e76bf4 2024-12-16T18:58:33,073 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73b443c21ee806be6a66ecec93f40711/info of 73b443c21ee806be6a66ecec93f40711 into ecc875e0fabc4b358a17799293e76bf4(size=210.4 K), total size for store is 210.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-16T18:58:33,073 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:33,073 INFO [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., storeName=73b443c21ee806be6a66ecec93f40711/info, priority=13, startTime=1734375512986; duration=0sec 2024-12-16T18:58:33,073 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-16T18:58:33,073 DEBUG [RS:0;7a6e4bf70377:42969-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73b443c21ee806be6a66ecec93f40711:info 2024-12-16T18:58:33,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:33,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:33,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:34,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:34,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:34,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:34,526 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-16T18:58:34,526 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42969%2C1734375405882.1734375514526 2024-12-16T18:58:34,536 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375406499 with entries=311, filesize=307.59 KB; new WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375514526 2024-12-16T18:58:34,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41819:41819),(127.0.0.1/127.0.0.1:38895:38895)] 2024-12-16T18:58:34,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375406499 is not closed yet, will try archiving it next time 2024-12-16T18:58:34,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741833_1009 (size=314980) 2024-12-16T18:58:34,542 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 73b443c21ee806be6a66ecec93f40711 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-16T18:58:34,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741833_1009 (size=314980) 2024-12-16T18:58:34,546 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/76e5334b9fc74108ad4f10c29f9958cc is 1080, key is row0256/info:/1734375512524/Put/seqid=0 2024-12-16T18:58:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741875_1051 (size=6035) 2024-12-16T18:58:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741875_1051 (size=6035) 2024-12-16T18:58:34,561 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/76e5334b9fc74108ad4f10c29f9958cc 2024-12-16T18:58:34,566 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/.tmp/info/76e5334b9fc74108ad4f10c29f9958cc as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/76e5334b9fc74108ad4f10c29f9958cc 2024-12-16T18:58:34,572 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/76e5334b9fc74108ad4f10c29f9958cc, entries=1, sequenceid=329, filesize=5.9 K 2024-12-16T18:58:34,573 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 73b443c21ee806be6a66ecec93f40711 in 31ms, sequenceid=329, compaction requested=false 2024-12-16T18:58:34,573 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:34,573 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 58ec97c1298be15af22bf03642bc8ca9 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:58:34,592 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/.tmp/info/5c36b32aa3bc4d91ae3d694146e1c609 is 45, key is default/info:d/1734375407396/Put/seqid=0 2024-12-16T18:58:34,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741876_1052 (size=5037) 2024-12-16T18:58:34,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741876_1052 (size=5037) 2024-12-16T18:58:34,610 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/.tmp/info/5c36b32aa3bc4d91ae3d694146e1c609 2024-12-16T18:58:34,620 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/.tmp/info/5c36b32aa3bc4d91ae3d694146e1c609 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/info/5c36b32aa3bc4d91ae3d694146e1c609 2024-12-16T18:58:34,626 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/info/5c36b32aa3bc4d91ae3d694146e1c609, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T18:58:34,627 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 58ec97c1298be15af22bf03642bc8ca9 in 53ms, sequenceid=6, compaction requested=false 2024-12-16T18:58:34,627 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 58ec97c1298be15af22bf03642bc8ca9: 2024-12-16T18:58:34,627 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-16T18:58:34,632 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/09e5691a251d40c993d5ef8257e7f9b2 is 193, key is TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711./info:regioninfo/1734375432588/Put/seqid=0 2024-12-16T18:58:34,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741877_1053 (size=7803) 2024-12-16T18:58:34,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741877_1053 (size=7803) 2024-12-16T18:58:34,644 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/09e5691a251d40c993d5ef8257e7f9b2 2024-12-16T18:58:34,648 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/.tmp/info/09e5691a251d40c993d5ef8257e7f9b2 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/info/09e5691a251d40c993d5ef8257e7f9b2 2024-12-16T18:58:34,653 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/info/09e5691a251d40c993d5ef8257e7f9b2, entries=16, sequenceid=24, filesize=7.6 K 2024-12-16T18:58:34,654 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=24, compaction requested=false 2024-12-16T18:58:34,654 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-16T18:58:34,654 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 6c92de9f8fe3ea13408be58d6f2c0c24: 2024-12-16T18:58:34,655 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C42969%2C1734375405882.1734375514654 2024-12-16T18:58:34,663 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375514526 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375514654 2024-12-16T18:58:34,663 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38895:38895),(127.0.0.1/127.0.0.1:41819:41819)] 2024-12-16T18:58:34,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375514526 is not closed yet, will try archiving it next time 2024-12-16T18:58:34,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741874_1050 (size=1255) 2024-12-16T18:58:34,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741874_1050 (size=1255) 2024-12-16T18:58:34,666 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375406499 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs/7a6e4bf70377%2C42969%2C1734375405882.1734375406499 2024-12-16T18:58:34,666 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T18:58:34,666 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:58:34,667 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T18:58:34,667 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c391f13 to 127.0.0.1:62808 2024-12-16T18:58:34,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:34,667 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882/7a6e4bf70377%2C42969%2C1734375405882.1734375514526 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs/7a6e4bf70377%2C42969%2C1734375405882.1734375514526 2024-12-16T18:58:34,667 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:58:34,667 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1929806074, stopped=false 2024-12-16T18:58:34,668 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,34203,1734375405739 2024-12-16T18:58:34,683 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:58:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:34,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:34,684 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:34,684 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:34,684 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,42969,1734375405882' ***** 2024-12-16T18:58:34,684 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(3579): Received CLOSE for 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:34,684 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(3579): Received CLOSE for 58ec97c1298be15af22bf03642bc8ca9 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(3579): Received CLOSE for 6c92de9f8fe3ea13408be58d6f2c0c24 2024-12-16T18:58:34,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 73b443c21ee806be6a66ecec93f40711, disabling compactions & flushes 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:34,684 DEBUG [RS:0;7a6e4bf70377:42969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:34,684 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:34,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:34,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. after waiting 0 ms 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:58:34,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:58:34,684 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:58:34,685 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-16T18:58:34,685 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1603): Online Regions={73b443c21ee806be6a66ecec93f40711=TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711., 58ec97c1298be15af22bf03642bc8ca9=hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9., 1588230740=hbase:meta,,1.1588230740, 6c92de9f8fe3ea13408be58d6f2c0c24=TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.} 2024-12-16T18:58:34,685 DEBUG [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 58ec97c1298be15af22bf03642bc8ca9, 6c92de9f8fe3ea13408be58d6f2c0c24, 73b443c21ee806be6a66ecec93f40711 2024-12-16T18:58:34,685 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:58:34,685 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:58:34,685 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:58:34,685 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:58:34,685 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:58:34,685 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-top, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/4656e5b7b396435d936c22c9cd9317e5, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c4c30efe4ddc4dba8bf5517635ff9990, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/2a1ef71e441f447d816625bda9c0a0d2, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/f6e4c22fbe0b4e1fa39f7bca5dc22991, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8f1791fe96004d9886706a86c03ada70, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/11aacd3108d74cf1be56837f69fb6402, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218] to archive 2024-12-16T18:58:34,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T18:58:34,689 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:58:34,689 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-f262ce3eadeb458d93b424eb7d375bd8 2024-12-16T18:58:34,690 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/4656e5b7b396435d936c22c9cd9317e5 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/4656e5b7b396435d936c22c9cd9317e5 2024-12-16T18:58:34,690 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/TestLogRolling-testLogRolling=fab14af368cb42e95d16937b9023fbb2-4011e518faa141118ae139ee5773ec90 2024-12-16T18:58:34,691 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/642e6e8c93e84c13ac3deb92cc05488f 2024-12-16T18:58:34,691 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/1c39b753bd3a40aa9940e6a49dd447b4 2024-12-16T18:58:34,692 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c4c30efe4ddc4dba8bf5517635ff9990 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c4c30efe4ddc4dba8bf5517635ff9990 2024-12-16T18:58:34,692 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/2a1ef71e441f447d816625bda9c0a0d2 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/2a1ef71e441f447d816625bda9c0a0d2 2024-12-16T18:58:34,693 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8ffb7de35e7740168b6ba8de4d0ca999 2024-12-16T18:58:34,693 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/b7dcae6c915e4affaf1784404db6eea4 2024-12-16T18:58:34,693 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/332d4291106d486cbd40082519e6e47b 2024-12-16T18:58:34,693 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/758ff5eeb9fa4ab5982f7ed9edd772a0 2024-12-16T18:58:34,693 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/f6e4c22fbe0b4e1fa39f7bca5dc22991 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/f6e4c22fbe0b4e1fa39f7bca5dc22991 2024-12-16T18:58:34,694 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/0d2e9724b31e41baa370274dfadc0ef3 2024-12-16T18:58:34,695 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8f1791fe96004d9886706a86c03ada70 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/8f1791fe96004d9886706a86c03ada70 2024-12-16T18:58:34,695 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/fd546bcff1314164942b2c3d3162d135 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/24190ee559f04470a0be0e814b93e8af 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/11aacd3108d74cf1be56837f69fb6402 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/11aacd3108d74cf1be56837f69fb6402 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/34825c18fc634f93b8d4f281d1a5ff1d 2024-12-16T18:58:34,696 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:58:34,696 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:58:34,696 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/78e3545f38c24afca2d0ebec4e9b2218 2024-12-16T18:58:34,696 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/info/c2e416b6852e4878ac39b75a41749cc1 2024-12-16T18:58:34,696 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T18:58:34,702 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/73b443c21ee806be6a66ecec93f40711/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-12-16T18:58:34,702 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:34,702 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 73b443c21ee806be6a66ecec93f40711: 2024-12-16T18:58:34,702 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1734375431853.73b443c21ee806be6a66ecec93f40711. 2024-12-16T18:58:34,703 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 58ec97c1298be15af22bf03642bc8ca9, disabling compactions & flushes 2024-12-16T18:58:34,703 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:58:34,703 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:58:34,703 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. after waiting 0 ms 2024-12-16T18:58:34,703 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:58:34,709 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/hbase/namespace/58ec97c1298be15af22bf03642bc8ca9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T18:58:34,710 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 58ec97c1298be15af22bf03642bc8ca9: 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375406946.58ec97c1298be15af22bf03642bc8ca9. 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6c92de9f8fe3ea13408be58d6f2c0c24, disabling compactions & flushes 2024-12-16T18:58:34,710 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. after waiting 0 ms 2024-12-16T18:58:34,710 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:58:34,710 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2->hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/fab14af368cb42e95d16937b9023fbb2/info/d1496b1dc1d345a087f65afc77b7a8f8-bottom] to archive 2024-12-16T18:58:34,711 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-16T18:58:34,713 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2 to hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/archive/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/info/d1496b1dc1d345a087f65afc77b7a8f8.fab14af368cb42e95d16937b9023fbb2 2024-12-16T18:58:34,718 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/data/default/TestLogRolling-testLogRolling/6c92de9f8fe3ea13408be58d6f2c0c24/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-12-16T18:58:34,718 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:58:34,718 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6c92de9f8fe3ea13408be58d6f2c0c24: 2024-12-16T18:58:34,718 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1734375431853.6c92de9f8fe3ea13408be58d6f2c0c24. 2024-12-16T18:58:34,885 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,42969,1734375405882; all regions closed. 2024-12-16T18:58:34,885 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:34,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741834_1010 (size=9351) 2024-12-16T18:58:34,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741834_1010 (size=9351) 2024-12-16T18:58:34,889 DEBUG [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs 2024-12-16T18:58:34,889 INFO [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C42969%2C1734375405882.meta:.meta(num 1734375406834) 2024-12-16T18:58:34,890 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/WALs/7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:34,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741878_1054 (size=1071) 2024-12-16T18:58:34,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741878_1054 (size=1071) 2024-12-16T18:58:34,893 DEBUG [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/oldWALs 2024-12-16T18:58:34,893 INFO [RS:0;7a6e4bf70377:42969 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C42969%2C1734375405882:(num 1734375514654) 2024-12-16T18:58:34,893 DEBUG [RS:0;7a6e4bf70377:42969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:34,893 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:58:34,893 INFO [RS:0;7a6e4bf70377:42969 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T18:58:34,894 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:58:34,894 INFO [RS:0;7a6e4bf70377:42969 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42969 2024-12-16T18:58:34,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:58:34,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,42969,1734375405882 2024-12-16T18:58:34,916 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,42969,1734375405882] 2024-12-16T18:58:34,916 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,42969,1734375405882; numProcessing=1 2024-12-16T18:58:34,924 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,42969,1734375405882 already deleted, retry=false 2024-12-16T18:58:34,924 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,42969,1734375405882 expired; onlineServers=0 2024-12-16T18:58:34,924 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,34203,1734375405739' ***** 2024-12-16T18:58:34,924 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:58:34,925 DEBUG [M:0;7a6e4bf70377:34203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a49e29a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:58:34,925 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,34203,1734375405739 2024-12-16T18:58:34,925 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,34203,1734375405739; all regions closed. 2024-12-16T18:58:34,925 DEBUG [M:0;7a6e4bf70377:34203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:34,925 DEBUG [M:0;7a6e4bf70377:34203 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:58:34,925 DEBUG [M:0;7a6e4bf70377:34203 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:58:34,925 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:58:34,925 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375406210 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375406210,5,FailOnTimeoutGroup] 2024-12-16T18:58:34,925 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375406210 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375406210,5,FailOnTimeoutGroup] 2024-12-16T18:58:34,925 INFO [M:0;7a6e4bf70377:34203 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:58:34,925 DEBUG [M:0;7a6e4bf70377:34203 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:58:34,925 INFO [M:0;7a6e4bf70377:34203 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:58:34,925 INFO [M:0;7a6e4bf70377:34203 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:58:34,926 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:58:34,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:58:34,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:34,933 DEBUG [M:0;7a6e4bf70377:34203 {}] zookeeper.ZKUtil(347): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:58:34,933 WARN [M:0;7a6e4bf70377:34203 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:58:34,933 INFO [M:0;7a6e4bf70377:34203 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:58:34,933 INFO [M:0;7a6e4bf70377:34203 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:58:34,933 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:58:34,933 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:58:34,933 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:34,933 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:34,933 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:58:34,933 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:34,934 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.45 KB heapSize=81.70 KB 2024-12-16T18:58:34,953 DEBUG [M:0;7a6e4bf70377:34203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287987e3353b46d7a46dd78880cf57ea is 82, key is hbase:meta,,1/info:regioninfo/1734375406870/Put/seqid=0 2024-12-16T18:58:34,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741879_1055 (size=5672) 2024-12-16T18:58:34,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741879_1055 (size=5672) 2024-12-16T18:58:34,960 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287987e3353b46d7a46dd78880cf57ea 2024-12-16T18:58:34,985 DEBUG [M:0;7a6e4bf70377:34203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3c9434ba89b49e1b3d9ede109e20f40 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734375407922/Put/seqid=0 2024-12-16T18:58:34,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741880_1056 (size=7286) 2024-12-16T18:58:34,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741880_1056 (size=7286) 2024-12-16T18:58:34,998 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3c9434ba89b49e1b3d9ede109e20f40 2024-12-16T18:58:35,003 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3c9434ba89b49e1b3d9ede109e20f40 2024-12-16T18:58:35,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:35,016 INFO [RS:0;7a6e4bf70377:42969 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,42969,1734375405882; zookeeper connection closed. 2024-12-16T18:58:35,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42969-0x100e45677550001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:35,016 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d0d8cf4 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d0d8cf4 2024-12-16T18:58:35,017 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T18:58:35,023 DEBUG [M:0;7a6e4bf70377:34203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de3872c4cae5499e80c4c8e9ddfcc0db is 69, key is 7a6e4bf70377,42969,1734375405882/rs:state/1734375406341/Put/seqid=0 2024-12-16T18:58:35,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741881_1057 (size=5156) 2024-12-16T18:58:35,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741881_1057 (size=5156) 2024-12-16T18:58:35,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:35,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:35,433 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de3872c4cae5499e80c4c8e9ddfcc0db 2024-12-16T18:58:35,457 DEBUG [M:0;7a6e4bf70377:34203 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a4e290a9d78446b9d24710ffd5a1f82 is 52, key is load_balancer_on/state:d/1734375407523/Put/seqid=0 2024-12-16T18:58:35,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741882_1058 (size=5056) 2024-12-16T18:58:35,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741882_1058 (size=5056) 2024-12-16T18:58:35,470 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a4e290a9d78446b9d24710ffd5a1f82 2024-12-16T18:58:35,476 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/287987e3353b46d7a46dd78880cf57ea as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/287987e3353b46d7a46dd78880cf57ea 2024-12-16T18:58:35,481 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/287987e3353b46d7a46dd78880cf57ea, entries=8, sequenceid=164, filesize=5.5 K 2024-12-16T18:58:35,482 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3c9434ba89b49e1b3d9ede109e20f40 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3c9434ba89b49e1b3d9ede109e20f40 2024-12-16T18:58:35,488 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3c9434ba89b49e1b3d9ede109e20f40 2024-12-16T18:58:35,488 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3c9434ba89b49e1b3d9ede109e20f40, entries=18, sequenceid=164, filesize=7.1 K 2024-12-16T18:58:35,490 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/de3872c4cae5499e80c4c8e9ddfcc0db as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de3872c4cae5499e80c4c8e9ddfcc0db 2024-12-16T18:58:35,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:35,497 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/de3872c4cae5499e80c4c8e9ddfcc0db, entries=1, sequenceid=164, filesize=5.0 K 2024-12-16T18:58:35,499 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a4e290a9d78446b9d24710ffd5a1f82 as hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a4e290a9d78446b9d24710ffd5a1f82 2024-12-16T18:58:35,506 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32987/user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a4e290a9d78446b9d24710ffd5a1f82, entries=1, sequenceid=164, filesize=4.9 K 2024-12-16T18:58:35,507 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.45 KB/68043, heapSize ~81.63 KB/83592, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 574ms, sequenceid=164, compaction requested=false 2024-12-16T18:58:35,518 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:35,519 DEBUG [M:0;7a6e4bf70377:34203 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:58:35,522 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2d726915-1c45-252b-efe0-1c1717cc6c4a/MasterData/WALs/7a6e4bf70377,34203,1734375405739 2024-12-16T18:58:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741830_1006 (size=79272) 2024-12-16T18:58:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36229 is added to blk_1073741830_1006 (size=79272) 2024-12-16T18:58:35,528 INFO [M:0;7a6e4bf70377:34203 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:58:35,528 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:58:35,528 INFO [M:0;7a6e4bf70377:34203 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34203 2024-12-16T18:58:35,538 DEBUG [M:0;7a6e4bf70377:34203 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,34203,1734375405739 already deleted, retry=false 2024-12-16T18:58:35,647 INFO [M:0;7a6e4bf70377:34203 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,34203,1734375405739; zookeeper connection closed. 2024-12-16T18:58:35,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:35,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34203-0x100e45677550000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:35,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@202ada42{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:35,656 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@379c0c1f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:35,656 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:35,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5963885b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:35,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@499b8a67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:35,659 WARN [BP-606476777-172.17.0.2-1734375403429 heartbeating to localhost/127.0.0.1:32987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:58:35,659 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:58:35,659 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:58:35,659 WARN [BP-606476777-172.17.0.2-1734375403429 heartbeating to localhost/127.0.0.1:32987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-606476777-172.17.0.2-1734375403429 (Datanode Uuid 6af1cc1c-3469-418d-b44a-b91b4334daae) service to localhost/127.0.0.1:32987 2024-12-16T18:58:35,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data3/current/BP-606476777-172.17.0.2-1734375403429 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:35,661 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:58:35,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data4/current/BP-606476777-172.17.0.2-1734375403429 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:35,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26ac97a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:35,674 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f52bd0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:35,674 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:35,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ab6d217{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:35,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@436616ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:35,678 WARN [BP-606476777-172.17.0.2-1734375403429 heartbeating to localhost/127.0.0.1:32987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:58:35,678 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:58:35,678 WARN [BP-606476777-172.17.0.2-1734375403429 heartbeating to localhost/127.0.0.1:32987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-606476777-172.17.0.2-1734375403429 (Datanode Uuid 21f77a1b-dc99-4c51-addf-0571c39b983e) service to localhost/127.0.0.1:32987 2024-12-16T18:58:35,678 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:58:35,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data2/current/BP-606476777-172.17.0.2-1734375403429 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:35,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:58:35,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/cluster_3fb46f58-e3a5-71a0-610a-fbec802569e1/dfs/data/data1/current/BP-606476777-172.17.0.2-1734375403429 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:35,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63986ce6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:58:35,711 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29aa261{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:35,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:35,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ef0a6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:35,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@650dbc9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:35,743 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:58:35,787 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:58:35,801 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=131 (was 112) - Thread LEAK? -, OpenFileDescriptor=490 (was 466) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=442 (was 266) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4219 (was 3861) - AvailableMemoryMB LEAK? - 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=132, OpenFileDescriptor=490, MaxFileDescriptor=1048576, SystemLoadAverage=442, ProcessCount=11, AvailableMemoryMB=4217 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.log.dir so I do NOT create it in target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/77ddf768-ad53-2a81-c849-7b3ff275ddcc/hadoop.tmp.dir so I do NOT create it in target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b, deleteOnExit=true 2024-12-16T18:58:35,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/test.cache.data in system properties and HBase conf 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir in system properties and HBase conf 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T18:58:35,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T18:58:35,813 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:58:35,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T18:58:35,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/nfs.dump.dir in system properties and HBase conf 2024-12-16T18:58:35,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/java.io.tmpdir in system properties and HBase conf 2024-12-16T18:58:35,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T18:58:35,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T18:58:35,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T18:58:35,830 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:58:36,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:36,362 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:58:36,366 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:58:36,366 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:58:36,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:58:36,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:58:36,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T18:58:36,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:58:36,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a3af2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:58:36,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d3d25ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:58:36,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:36,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:36,495 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5575d521{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/java.io.tmpdir/jetty-localhost-39759-hadoop-hdfs-3_4_1-tests_jar-_-any-16027002796447748847/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:58:36,495 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10765324{HTTP/1.1, (http/1.1)}{localhost:39759} 2024-12-16T18:58:36,496 INFO [Time-limited test {}] server.Server(415): Started @420862ms 2024-12-16T18:58:36,510 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-16T18:58:36,943 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:58:36,946 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:58:36,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:58:36,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:58:36,947 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:58:36,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b36d04a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:58:36,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36e6628c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:58:37,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@523bfc81{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/java.io.tmpdir/jetty-localhost-35281-hadoop-hdfs-3_4_1-tests_jar-_-any-3128986065029851024/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:37,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1328adb7{HTTP/1.1, (http/1.1)}{localhost:35281} 2024-12-16T18:58:37,064 INFO [Time-limited test {}] server.Server(415): Started @421430ms 2024-12-16T18:58:37,065 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:58:37,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T18:58:37,121 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T18:58:37,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T18:58:37,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T18:58:37,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T18:58:37,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1055425{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,AVAILABLE} 2024-12-16T18:58:37,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58a001ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T18:58:37,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:37,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a328bc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/java.io.tmpdir/jetty-localhost-39503-hadoop-hdfs-3_4_1-tests_jar-_-any-2927994331496882643/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:37,251 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@421d8f5d{HTTP/1.1, (http/1.1)}{localhost:39503} 2024-12-16T18:58:37,252 INFO [Time-limited test {}] server.Server(415): Started @421618ms 2024-12-16T18:58:37,253 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T18:58:37,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:37,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:38,118 WARN [Thread-2255 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data1/current/BP-1294099631-172.17.0.2-1734375515853/current, will proceed with Du for space computation calculation, 2024-12-16T18:58:38,119 WARN [Thread-2256 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data2/current/BP-1294099631-172.17.0.2-1734375515853/current, will proceed with Du for space computation calculation, 2024-12-16T18:58:38,136 WARN [Thread-2219 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:58:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f75b44bdde7bc4 with lease ID 0x462e20937a2c815b: Processing first storage report for DS-f9cfcaff-efc6-4278-a5c4-747cb2c61bcb from datanode DatanodeRegistration(127.0.0.1:42947, datanodeUuid=dbb34cf5-c96c-4001-b83b-44451337dff3, infoPort=39029, infoSecurePort=0, ipcPort=38599, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853) 2024-12-16T18:58:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f75b44bdde7bc4 with lease ID 0x462e20937a2c815b: from storage DS-f9cfcaff-efc6-4278-a5c4-747cb2c61bcb node DatanodeRegistration(127.0.0.1:42947, datanodeUuid=dbb34cf5-c96c-4001-b83b-44451337dff3, infoPort=39029, infoSecurePort=0, ipcPort=38599, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:58:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f75b44bdde7bc4 with lease ID 0x462e20937a2c815b: Processing first storage report for DS-9319c360-f7c4-452b-b294-121472462405 from datanode DatanodeRegistration(127.0.0.1:42947, datanodeUuid=dbb34cf5-c96c-4001-b83b-44451337dff3, infoPort=39029, infoSecurePort=0, ipcPort=38599, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853) 2024-12-16T18:58:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f75b44bdde7bc4 with lease ID 0x462e20937a2c815b: from storage DS-9319c360-f7c4-452b-b294-121472462405 node DatanodeRegistration(127.0.0.1:42947, datanodeUuid=dbb34cf5-c96c-4001-b83b-44451337dff3, infoPort=39029, infoSecurePort=0, ipcPort=38599, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:58:38,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:38,227 WARN [Thread-2266 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data3/current/BP-1294099631-172.17.0.2-1734375515853/current, will proceed with Du for space computation calculation, 2024-12-16T18:58:38,227 WARN [Thread-2267 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data4/current/BP-1294099631-172.17.0.2-1734375515853/current, will proceed with Du for space computation calculation, 2024-12-16T18:58:38,245 WARN [Thread-2242 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T18:58:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40accafe17bb3f59 with lease ID 0x462e20937a2c815c: Processing first storage report for DS-4f8dfab8-1d39-4c42-8ec9-53e3abe57d01 from datanode DatanodeRegistration(127.0.0.1:40345, datanodeUuid=ea053108-57e1-4001-81d9-9222ccbdc23c, infoPort=37659, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853) 2024-12-16T18:58:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40accafe17bb3f59 with lease ID 0x462e20937a2c815c: from storage DS-4f8dfab8-1d39-4c42-8ec9-53e3abe57d01 node DatanodeRegistration(127.0.0.1:40345, datanodeUuid=ea053108-57e1-4001-81d9-9222ccbdc23c, infoPort=37659, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:58:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40accafe17bb3f59 with lease ID 0x462e20937a2c815c: Processing first storage report for DS-a10a3209-f56c-418a-a2c0-678062389e49 from datanode DatanodeRegistration(127.0.0.1:40345, datanodeUuid=ea053108-57e1-4001-81d9-9222ccbdc23c, infoPort=37659, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853) 2024-12-16T18:58:38,247 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40accafe17bb3f59 with lease ID 0x462e20937a2c815c: from storage DS-a10a3209-f56c-418a-a2c0-678062389e49 node DatanodeRegistration(127.0.0.1:40345, datanodeUuid=ea053108-57e1-4001-81d9-9222ccbdc23c, infoPort=37659, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1063564816;c=1734375515853), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T18:58:38,307 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207 2024-12-16T18:58:38,310 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/zookeeper_0, clientPort=55441, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T18:58:38,311 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55441 2024-12-16T18:58:38,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:58:38,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741825_1001 (size=7) 2024-12-16T18:58:38,326 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95 with version=8 2024-12-16T18:58:38,326 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:33663/user/jenkins/test-data/8e375712-1955-b312-7a6f-f1d86acc76cc/hbase-staging 2024-12-16T18:58:38,327 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:58:38,328 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35453 2024-12-16T18:58:38,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,330 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,332 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35453 connecting to ZooKeeper ensemble=127.0.0.1:55441 2024-12-16T18:58:38,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:38,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:354530x0, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:58:38,444 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35453-0x100e4582f1f0000 connected 2024-12-16T18:58:38,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:38,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:58:38,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:38,526 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:58:38,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35453 2024-12-16T18:58:38,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35453 2024-12-16T18:58:38,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35453 2024-12-16T18:58:38,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35453 2024-12-16T18:58:38,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35453 2024-12-16T18:58:38,539 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95, hbase.cluster.distributed=false 2024-12-16T18:58:38,559 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T18:58:38,559 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T18:58:38,560 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44933 2024-12-16T18:58:38,561 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T18:58:38,562 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-16T18:58:38,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,566 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44933 connecting to ZooKeeper ensemble=127.0.0.1:55441 2024-12-16T18:58:38,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449330x0, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T18:58:38,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:449330x0, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:58:38,575 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44933-0x100e4582f1f0001 connected 2024-12-16T18:58:38,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:38,576 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T18:58:38,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44933 2024-12-16T18:58:38,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44933 2024-12-16T18:58:38,583 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44933 2024-12-16T18:58:38,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44933 2024-12-16T18:58:38,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44933 2024-12-16T18:58:38,589 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:58:38,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:58:38,600 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,606 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:35453 2024-12-16T18:58:38,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:58:38,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T18:58:38,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,608 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:58:38,608 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,35453,1734375518327 from backup master directory 2024-12-16T18:58:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:58:38,616 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T18:58:38,616 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:58:38,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T18:58:38,616 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:58:38,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741826_1002 (size=42) 2024-12-16T18:58:38,626 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/hbase.id with ID: ee169570-c860-4458-ac26-9596c1d46eac 2024-12-16T18:58:38,635 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:38,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:58:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741827_1003 (size=196) 2024-12-16T18:58:38,654 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T18:58:38,654 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T18:58:38,655 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:58:38,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:58:38,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741828_1004 (size=1189) 2024-12-16T18:58:38,662 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store 2024-12-16T18:58:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:58:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741829_1005 (size=34) 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:58:38,668 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:38,668 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:38,668 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:58:38,668 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/.initializing 2024-12-16T18:58:38,669 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/WALs/7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,671 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C35453%2C1734375518327, suffix=, logDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/WALs/7a6e4bf70377,35453,1734375518327, archiveDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/oldWALs, maxLogs=10 2024-12-16T18:58:38,671 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C35453%2C1734375518327.1734375518671 2024-12-16T18:58:38,676 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/WALs/7a6e4bf70377,35453,1734375518327/7a6e4bf70377%2C35453%2C1734375518327.1734375518671 2024-12-16T18:58:38,676 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-16T18:58:38,676 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:58:38,676 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:38,676 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,676 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T18:58:38,679 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:38,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:38,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T18:58:38,680 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:38,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:58:38,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T18:58:38,682 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:38,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:58:38,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T18:58:38,683 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:38,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:58:38,684 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,684 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,686 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T18:58:38,687 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T18:58:38,688 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:58:38,689 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802766, jitterRate=0.020770296454429626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T18:58:38,690 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:58:38,690 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T18:58:38,694 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@121b02f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:58:38,695 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T18:58:38,695 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T18:58:38,695 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T18:58:38,696 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T18:58:38,696 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-16T18:58:38,697 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-16T18:58:38,697 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T18:58:38,700 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T18:58:38,701 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T18:58:38,714 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T18:58:38,714 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T18:58:38,715 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T18:58:38,724 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T18:58:38,724 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T18:58:38,725 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T18:58:38,732 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T18:58:38,733 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T18:58:38,741 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T18:58:38,743 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T18:58:38,749 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T18:58:38,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:38,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:38,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,758 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,35453,1734375518327, sessionid=0x100e4582f1f0000, setting cluster-up flag (Was=false) 2024-12-16T18:58:38,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,799 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T18:58:38,800 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:38,841 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T18:58:38,841 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T18:58:38,844 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T18:58:38,844 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,35453,1734375518327 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:58:38,844 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,845 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:58:38,845 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734375548846 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T18:58:38,846 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T18:58:38,847 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,847 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T18:58:38,847 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T18:58:38,847 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T18:58:38,850 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T18:58:38,850 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T18:58:38,850 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375518850,5,FailOnTimeoutGroup] 2024-12-16T18:58:38,854 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375518850,5,FailOnTimeoutGroup] 2024-12-16T18:58:38,854 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,854 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T18:58:38,854 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,854 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,858 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:38,858 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:58:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:58:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741831_1007 (size=1039) 2024-12-16T18:58:38,905 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:44933 2024-12-16T18:58:38,906 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1008): ClusterId : ee169570-c860-4458-ac26-9596c1d46eac 2024-12-16T18:58:38,906 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T18:58:38,917 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T18:58:38,917 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T18:58:38,933 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T18:58:38,934 DEBUG [RS:0;7a6e4bf70377:44933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@282967e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:58:38,934 DEBUG [RS:0;7a6e4bf70377:44933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@375ecef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:58:38,935 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T18:58:38,935 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T18:58:38,935 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T18:58:38,935 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,35453,1734375518327 with isa=7a6e4bf70377/172.17.0.2:44933, startcode=1734375518558 2024-12-16T18:58:38,936 DEBUG [RS:0;7a6e4bf70377:44933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T18:58:38,938 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54091, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T18:58:38,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35453 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35453 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,941 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95 2024-12-16T18:58:38,941 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34465 2024-12-16T18:58:38,941 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T18:58:38,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:58:38,949 DEBUG [RS:0;7a6e4bf70377:44933 {}] zookeeper.ZKUtil(111): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,949 WARN [RS:0;7a6e4bf70377:44933 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T18:58:38,950 INFO [RS:0;7a6e4bf70377:44933 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:58:38,950 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,950 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,44933,1734375518558] 2024-12-16T18:58:38,956 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T18:58:38,956 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T18:58:38,957 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T18:58:38,958 INFO [RS:0;7a6e4bf70377:44933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T18:58:38,958 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,962 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T18:58:38,963 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T18:58:38,963 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:58:38,964 DEBUG [RS:0;7a6e4bf70377:44933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T18:58:38,964 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,964 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,964 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,964 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,964 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,44933,1734375518558-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:58:38,981 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T18:58:38,981 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,44933,1734375518558-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:38,994 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.Replication(204): 7a6e4bf70377,44933,1734375518558 started 2024-12-16T18:58:38,994 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,44933,1734375518558, RpcServer on 7a6e4bf70377/172.17.0.2:44933, sessionid=0x100e4582f1f0001 2024-12-16T18:58:38,994 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T18:58:38,994 DEBUG [RS:0;7a6e4bf70377:44933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,994 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,44933,1734375518558' 2024-12-16T18:58:38,994 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T18:58:38,994 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,44933,1734375518558' 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T18:58:38,995 DEBUG [RS:0;7a6e4bf70377:44933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T18:58:38,995 INFO [RS:0;7a6e4bf70377:44933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T18:58:38,995 INFO [RS:0;7a6e4bf70377:44933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T18:58:39,097 INFO [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C44933%2C1734375518558, suffix=, logDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558, archiveDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs, maxLogs=32 2024-12-16T18:58:39,098 INFO [RS:0;7a6e4bf70377:44933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44933%2C1734375518558.1734375519097 2024-12-16T18:58:39,114 INFO [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558/7a6e4bf70377%2C44933%2C1734375518558.1734375519097 2024-12-16T18:58:39,114 DEBUG [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37659:37659),(127.0.0.1/127.0.0.1:39029:39029)] 2024-12-16T18:58:39,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:39,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T18:58:39,289 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95 2024-12-16T18:58:39,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741833_1009 (size=32) 2024-12-16T18:58:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741833_1009 (size=32) 2024-12-16T18:58:39,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:39,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:39,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:39,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:58:39,703 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:58:39,703 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:39,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:39,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:58:39,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:58:39,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:39,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:39,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:58:39,707 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:58:39,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:39,708 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:39,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740 2024-12-16T18:58:39,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740 2024-12-16T18:58:39,711 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:58:39,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:58:39,714 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:58:39,715 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695387, jitterRate=-0.1157701164484024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:58:39,715 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:58:39,715 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:58:39,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:58:39,716 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T18:58:39,716 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T18:58:39,717 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T18:58:39,718 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T18:58:39,720 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T18:58:39,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:39,870 DEBUG [7a6e4bf70377:35453 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-16T18:58:39,871 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:39,872 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,44933,1734375518558, state=OPENING 2024-12-16T18:58:39,889 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T18:58:39,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:39,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:39,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:58:39,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:58:39,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,44933,1734375518558}] 2024-12-16T18:58:40,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:40,052 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T18:58:40,054 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T18:58:40,057 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T18:58:40,058 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:58:40,060 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C44933%2C1734375518558.meta, suffix=.meta, logDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558, archiveDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs, maxLogs=32 2024-12-16T18:58:40,060 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7a6e4bf70377%2C44933%2C1734375518558.meta.1734375520060.meta 2024-12-16T18:58:40,075 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558/7a6e4bf70377%2C44933%2C1734375518558.meta.1734375520060.meta 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T18:58:40,076 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T18:58:40,076 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T18:58:40,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T18:58:40,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T18:58:40,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:40,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:40,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T18:58:40,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T18:58:40,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:40,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:40,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T18:58:40,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T18:58:40,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:40,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T18:58:40,083 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740 2024-12-16T18:58:40,084 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740 2024-12-16T18:58:40,085 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-16T18:58:40,086 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T18:58:40,087 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710077, jitterRate=-0.0970904678106308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-16T18:58:40,088 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T18:58:40,089 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734375520051 2024-12-16T18:58:40,090 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T18:58:40,091 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T18:58:40,091 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:40,091 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,44933,1734375518558, state=OPEN 2024-12-16T18:58:40,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:58:40,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T18:58:40,137 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:58:40,137 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T18:58:40,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T18:58:40,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,44933,1734375518558 in 237 msec 2024-12-16T18:58:40,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:40,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T18:58:40,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 425 msec 2024-12-16T18:58:40,148 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.3040 sec 2024-12-16T18:58:40,149 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734375520148, completionTime=-1 2024-12-16T18:58:40,149 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-16T18:58:40,149 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T18:58:40,149 DEBUG [hconnection-0x295120a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:58:40,151 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:58:40,152 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-16T18:58:40,152 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734375580152 2024-12-16T18:58:40,152 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734375640152 2024-12-16T18:58:40,152 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:35453, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T18:58:40,175 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T18:58:40,176 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T18:58:40,177 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T18:58:40,177 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T18:58:40,178 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:40,179 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T18:58:40,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:58:40,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741835_1011 (size=358) 2024-12-16T18:58:40,201 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 90a6b4e01b695b788fef0ab1ad82f814, NAME => 'hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95 2024-12-16T18:58:40,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:58:40,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741836_1012 (size=42) 2024-12-16T18:58:40,243 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-16T18:58:40,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-16T18:58:40,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:40,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 90a6b4e01b695b788fef0ab1ad82f814, disabling compactions & flushes 2024-12-16T18:58:40,618 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. after waiting 0 ms 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,618 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,618 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 90a6b4e01b695b788fef0ab1ad82f814: 2024-12-16T18:58:40,619 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T18:58:40,619 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734375520619"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734375520619"}]},"ts":"1734375520619"} 2024-12-16T18:58:40,621 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T18:58:40,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T18:58:40,622 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375520622"}]},"ts":"1734375520622"} 2024-12-16T18:58:40,624 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T18:58:40,641 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=90a6b4e01b695b788fef0ab1ad82f814, ASSIGN}] 2024-12-16T18:58:40,643 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=90a6b4e01b695b788fef0ab1ad82f814, ASSIGN 2024-12-16T18:58:40,644 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=90a6b4e01b695b788fef0ab1ad82f814, ASSIGN; state=OFFLINE, location=7a6e4bf70377,44933,1734375518558; forceNewPlan=false, retain=false 2024-12-16T18:58:40,795 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=90a6b4e01b695b788fef0ab1ad82f814, regionState=OPENING, regionLocation=7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:40,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 90a6b4e01b695b788fef0ab1ad82f814, server=7a6e4bf70377,44933,1734375518558}] 2024-12-16T18:58:40,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:40,952 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,952 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 90a6b4e01b695b788fef0ab1ad82f814, NAME => 'hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.', STARTKEY => '', ENDKEY => ''} 2024-12-16T18:58:40,953 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,953 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T18:58:40,953 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,953 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,954 INFO [StoreOpener-90a6b4e01b695b788fef0ab1ad82f814-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,955 INFO [StoreOpener-90a6b4e01b695b788fef0ab1ad82f814-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90a6b4e01b695b788fef0ab1ad82f814 columnFamilyName info 2024-12-16T18:58:40,955 DEBUG [StoreOpener-90a6b4e01b695b788fef0ab1ad82f814-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T18:58:40,956 INFO [StoreOpener-90a6b4e01b695b788fef0ab1ad82f814-1 {}] regionserver.HStore(327): Store=90a6b4e01b695b788fef0ab1ad82f814/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T18:58:40,957 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,957 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:40,961 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T18:58:40,962 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 90a6b4e01b695b788fef0ab1ad82f814; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845080, jitterRate=0.07457511126995087}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T18:58:40,963 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 90a6b4e01b695b788fef0ab1ad82f814: 2024-12-16T18:58:40,963 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814., pid=6, masterSystemTime=1734375520949 2024-12-16T18:58:40,965 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,965 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:40,965 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=90a6b4e01b695b788fef0ab1ad82f814, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:40,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T18:58:40,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 90a6b4e01b695b788fef0ab1ad82f814, server=7a6e4bf70377,44933,1734375518558 in 171 msec 2024-12-16T18:58:40,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T18:58:40,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=90a6b4e01b695b788fef0ab1ad82f814, ASSIGN in 327 msec 2024-12-16T18:58:40,970 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T18:58:40,970 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734375520970"}]},"ts":"1734375520970"} 2024-12-16T18:58:40,971 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T18:58:40,981 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T18:58:40,981 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T18:58:40,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 806 msec 2024-12-16T18:58:40,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:40,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:58:40,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:40,994 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T18:58:41,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:58:41,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-16T18:58:41,027 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T18:58:41,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T18:58:41,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 28 msec 2024-12-16T18:58:41,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T18:58:41,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T18:58:41,099 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.483sec 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T18:58:41,100 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T18:58:41,106 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T18:58:41,106 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T18:58:41,106 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,35453,1734375518327-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T18:58:41,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:41,198 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b8ccc28 to 127.0.0.1:55441 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6402def4 2024-12-16T18:58:41,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50c1ef8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T18:58:41,211 DEBUG [hconnection-0x702a03a9-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T18:58:41,213 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T18:58:41,216 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:41,216 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T18:58:41,219 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-16T18:58:41,219 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-16T18:58:41,222 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1, archiveDir=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs, maxLogs=32 2024-12-16T18:58:41,223 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1734375521223 2024-12-16T18:58:41,230 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1/test.com%2C8080%2C1.1734375521223 2024-12-16T18:58:41,230 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-16T18:58:41,231 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1734375521230 2024-12-16T18:58:41,237 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1/test.com%2C8080%2C1.1734375521223 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1/test.com%2C8080%2C1.1734375521230 2024-12-16T18:58:41,237 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39029:39029),(127.0.0.1/127.0.0.1:37659:37659)] 2024-12-16T18:58:41,237 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1/test.com%2C8080%2C1.1734375521223 is not closed yet, will try archiving it next time 2024-12-16T18:58:41,238 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1 2024-12-16T18:58:41,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741837_1013 (size=93) 2024-12-16T18:58:41,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741837_1013 (size=93) 2024-12-16T18:58:41,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741838_1014 (size=93) 2024-12-16T18:58:41,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741838_1014 (size=93) 2024-12-16T18:58:41,240 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/test.com,8080,1/test.com%2C8080%2C1.1734375521223 to hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs/test.com%2C8080%2C1.1734375521223 2024-12-16T18:58:41,242 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs 2024-12-16T18:58:41,242 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1734375521230) 2024-12-16T18:58:41,242 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T18:58:41,242 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b8ccc28 to 127.0.0.1:55441 2024-12-16T18:58:41,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:41,242 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T18:58:41,242 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=434928912, stopped=false 2024-12-16T18:58:41,242 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:41,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:41,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:41,258 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T18:58:41,258 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:41,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T18:58:41,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:41,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:41,259 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,44933,1734375518558' ***** 2024-12-16T18:58:41,259 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T18:58:41,259 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T18:58:41,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T18:58:41,259 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T18:58:41,259 INFO [RS:0;7a6e4bf70377:44933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T18:58:41,259 INFO [RS:0;7a6e4bf70377:44933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T18:58:41,259 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(3579): Received CLOSE for 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:41,259 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:41,259 DEBUG [RS:0;7a6e4bf70377:44933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:41,260 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T18:58:41,260 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T18:58:41,260 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 90a6b4e01b695b788fef0ab1ad82f814, disabling compactions & flushes 2024-12-16T18:58:41,260 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T18:58:41,260 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. after waiting 0 ms 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:41,260 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-16T18:58:41,260 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 90a6b4e01b695b788fef0ab1ad82f814 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T18:58:41,260 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 90a6b4e01b695b788fef0ab1ad82f814=hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814.} 2024-12-16T18:58:41,260 DEBUG [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 90a6b4e01b695b788fef0ab1ad82f814 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T18:58:41,260 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T18:58:41,260 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T18:58:41,260 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-16T18:58:41,274 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/info/576d77593c034569962f025ada9f7d59 is 143, key is hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814./info:regioninfo/1734375520965/Put/seqid=0 2024-12-16T18:58:41,274 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/.tmp/info/509e74ee497747dd8e52d597e4f7f56c is 45, key is default/info:d/1734375520997/Put/seqid=0 2024-12-16T18:58:41,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741840_1016 (size=6595) 2024-12-16T18:58:41,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741840_1016 (size=6595) 2024-12-16T18:58:41,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741839_1015 (size=5037) 2024-12-16T18:58:41,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741839_1015 (size=5037) 2024-12-16T18:58:41,280 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/info/576d77593c034569962f025ada9f7d59 2024-12-16T18:58:41,280 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/.tmp/info/509e74ee497747dd8e52d597e4f7f56c 2024-12-16T18:58:41,286 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/.tmp/info/509e74ee497747dd8e52d597e4f7f56c as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/info/509e74ee497747dd8e52d597e4f7f56c 2024-12-16T18:58:41,291 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/info/509e74ee497747dd8e52d597e4f7f56c, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T18:58:41,292 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 90a6b4e01b695b788fef0ab1ad82f814 in 32ms, sequenceid=6, compaction requested=false 2024-12-16T18:58:41,292 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T18:58:41,297 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/namespace/90a6b4e01b695b788fef0ab1ad82f814/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T18:58:41,298 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:41,298 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 90a6b4e01b695b788fef0ab1ad82f814: 2024-12-16T18:58:41,298 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734375520175.90a6b4e01b695b788fef0ab1ad82f814. 2024-12-16T18:58:41,300 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/table/dce9aaff18f749649ad162772edc7518 is 51, key is hbase:namespace/table:state/1734375520970/Put/seqid=0 2024-12-16T18:58:41,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741841_1017 (size=5242) 2024-12-16T18:58:41,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741841_1017 (size=5242) 2024-12-16T18:58:41,305 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/table/dce9aaff18f749649ad162772edc7518 2024-12-16T18:58:41,310 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/info/576d77593c034569962f025ada9f7d59 as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/info/576d77593c034569962f025ada9f7d59 2024-12-16T18:58:41,316 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/info/576d77593c034569962f025ada9f7d59, entries=10, sequenceid=9, filesize=6.4 K 2024-12-16T18:58:41,317 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/.tmp/table/dce9aaff18f749649ad162772edc7518 as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/table/dce9aaff18f749649ad162772edc7518 2024-12-16T18:58:41,325 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/table/dce9aaff18f749649ad162772edc7518, entries=2, sequenceid=9, filesize=5.1 K 2024-12-16T18:58:41,326 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 66ms, sequenceid=9, compaction requested=false 2024-12-16T18:58:41,326 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T18:58:41,341 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-16T18:58:41,342 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T18:58:41,342 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T18:58:41,342 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T18:58:41,342 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T18:58:41,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.meta.1734375219669.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:41,460 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,44933,1734375518558; all regions closed. 2024-12-16T18:58:41,460 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741834_1010 (size=2484) 2024-12-16T18:58:41,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741834_1010 (size=2484) 2024-12-16T18:58:41,469 DEBUG [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs 2024-12-16T18:58:41,469 INFO [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C44933%2C1734375518558.meta:.meta(num 1734375520060) 2024-12-16T18:58:41,469 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/WALs/7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:41,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741832_1008 (size=1414) 2024-12-16T18:58:41,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741832_1008 (size=1414) 2024-12-16T18:58:41,480 DEBUG [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/oldWALs 2024-12-16T18:58:41,480 INFO [RS:0;7a6e4bf70377:44933 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7a6e4bf70377%2C44933%2C1734375518558:(num 1734375519097) 2024-12-16T18:58:41,480 DEBUG [RS:0;7a6e4bf70377:44933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:41,480 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T18:58:41,481 INFO [RS:0;7a6e4bf70377:44933 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T18:58:41,481 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:58:41,481 INFO [RS:0;7a6e4bf70377:44933 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44933 2024-12-16T18:58:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,44933,1734375518558 2024-12-16T18:58:41,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T18:58:41,491 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,44933,1734375518558] 2024-12-16T18:58:41,492 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,44933,1734375518558; numProcessing=1 2024-12-16T18:58:41,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/WALs/7a6e4bf70377,45905,1734375218213/7a6e4bf70377%2C45905%2C1734375218213.1734375219221 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:41,507 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,44933,1734375518558 already deleted, retry=false 2024-12-16T18:58:41,508 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,44933,1734375518558 expired; onlineServers=0 2024-12-16T18:58:41,508 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,35453,1734375518327' ***** 2024-12-16T18:58:41,508 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T18:58:41,508 DEBUG [M:0;7a6e4bf70377:35453 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583fbfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T18:58:41,508 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:41,508 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,35453,1734375518327; all regions closed. 2024-12-16T18:58:41,508 DEBUG [M:0;7a6e4bf70377:35453 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T18:58:41,508 DEBUG [M:0;7a6e4bf70377:35453 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T18:58:41,508 DEBUG [M:0;7a6e4bf70377:35453 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T18:58:41,508 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T18:58:41,508 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375518850 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734375518850,5,FailOnTimeoutGroup] 2024-12-16T18:58:41,508 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375518850 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734375518850,5,FailOnTimeoutGroup] 2024-12-16T18:58:41,508 INFO [M:0;7a6e4bf70377:35453 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T18:58:41,509 DEBUG [M:0;7a6e4bf70377:35453 {}] master.HMaster(1733): Stopping service threads 2024-12-16T18:58:41,509 INFO [M:0;7a6e4bf70377:35453 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T18:58:41,509 INFO [M:0;7a6e4bf70377:35453 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T18:58:41,509 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T18:58:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T18:58:41,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T18:58:41,524 DEBUG [M:0;7a6e4bf70377:35453 {}] zookeeper.ZKUtil(347): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T18:58:41,524 WARN [M:0;7a6e4bf70377:35453 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T18:58:41,524 INFO [M:0;7a6e4bf70377:35453 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T18:58:41,525 INFO [M:0;7a6e4bf70377:35453 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T18:58:41,525 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T18:58:41,525 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:41,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T18:58:41,525 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:41,525 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T18:58:41,525 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:41,525 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-16T18:58:41,549 DEBUG [M:0;7a6e4bf70377:35453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d33d711de10a417f86ef7e323297fe8c is 82, key is hbase:meta,,1/info:regioninfo/1734375520091/Put/seqid=0 2024-12-16T18:58:41,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741842_1018 (size=5672) 2024-12-16T18:58:41,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741842_1018 (size=5672) 2024-12-16T18:58:41,594 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d33d711de10a417f86ef7e323297fe8c 2024-12-16T18:58:41,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:41,599 INFO [RS:0;7a6e4bf70377:44933 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,44933,1734375518558; zookeeper connection closed. 2024-12-16T18:58:41,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44933-0x100e4582f1f0001, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:41,602 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@61f29ea {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@61f29ea 2024-12-16T18:58:41,602 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-16T18:58:41,621 DEBUG [M:0;7a6e4bf70377:35453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c604cd0be86c4993a77916fafb09848d is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1734375520982/Put/seqid=0 2024-12-16T18:58:41,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741843_1019 (size=6626) 2024-12-16T18:58:41,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741843_1019 (size=6626) 2024-12-16T18:58:42,032 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c604cd0be86c4993a77916fafb09848d 2024-12-16T18:58:42,052 DEBUG [M:0;7a6e4bf70377:35453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/962b6e7b6bfe49d790ad08fff3cdbca9 is 69, key is 7a6e4bf70377,44933,1734375518558/rs:state/1734375518938/Put/seqid=0 2024-12-16T18:58:42,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741844_1020 (size=5156) 2024-12-16T18:58:42,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741844_1020 (size=5156) 2024-12-16T18:58:42,058 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/962b6e7b6bfe49d790ad08fff3cdbca9 2024-12-16T18:58:42,078 DEBUG [M:0;7a6e4bf70377:35453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e291a418483f4ae590cc73ca60489119 is 52, key is load_balancer_on/state:d/1734375521218/Put/seqid=0 2024-12-16T18:58:42,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741845_1021 (size=5056) 2024-12-16T18:58:42,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741845_1021 (size=5056) 2024-12-16T18:58:42,089 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e291a418483f4ae590cc73ca60489119 2024-12-16T18:58:42,094 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d33d711de10a417f86ef7e323297fe8c as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d33d711de10a417f86ef7e323297fe8c 2024-12-16T18:58:42,099 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d33d711de10a417f86ef7e323297fe8c, entries=8, sequenceid=70, filesize=5.5 K 2024-12-16T18:58:42,101 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c604cd0be86c4993a77916fafb09848d as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c604cd0be86c4993a77916fafb09848d 2024-12-16T18:58:42,106 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c604cd0be86c4993a77916fafb09848d, entries=8, sequenceid=70, filesize=6.5 K 2024-12-16T18:58:42,107 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/962b6e7b6bfe49d790ad08fff3cdbca9 as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/962b6e7b6bfe49d790ad08fff3cdbca9 2024-12-16T18:58:42,116 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/962b6e7b6bfe49d790ad08fff3cdbca9, entries=1, sequenceid=70, filesize=5.0 K 2024-12-16T18:58:42,117 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e291a418483f4ae590cc73ca60489119 as hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e291a418483f4ae590cc73ca60489119 2024-12-16T18:58:42,122 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34465/user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e291a418483f4ae590cc73ca60489119, entries=1, sequenceid=70, filesize=4.9 K 2024-12-16T18:58:42,123 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=70, compaction requested=false 2024-12-16T18:58:42,124 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T18:58:42,124 DEBUG [M:0;7a6e4bf70377:35453 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T18:58:42,124 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8840ed86-6cb8-cbba-72ab-2a3e69334f95/MasterData/WALs/7a6e4bf70377,35453,1734375518327 2024-12-16T18:58:42,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40345 is added to blk_1073741830_1006 (size=31030) 2024-12-16T18:58:42,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42947 is added to blk_1073741830_1006 (size=31030) 2024-12-16T18:58:42,126 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T18:58:42,126 INFO [M:0;7a6e4bf70377:35453 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T18:58:42,126 INFO [M:0;7a6e4bf70377:35453 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35453 2024-12-16T18:58:42,132 DEBUG [M:0;7a6e4bf70377:35453 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,35453,1734375518327 already deleted, retry=false 2024-12-16T18:58:42,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35231/user/jenkins/test-data/0643b3af-c9e0-aceb-ae20-47f26f1bfd20/MasterData/WALs/7a6e4bf70377,36083,1734375217984/7a6e4bf70377%2C36083%2C1734375217984.1734375218574 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor108.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-16T18:58:42,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:42,241 INFO [M:0;7a6e4bf70377:35453 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,35453,1734375518327; zookeeper connection closed. 2024-12-16T18:58:42,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35453-0x100e4582f1f0000, quorum=127.0.0.1:55441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T18:58:42,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a328bc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:42,244 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@421d8f5d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:42,244 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:42,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58a001ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:42,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1055425{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:42,246 WARN [BP-1294099631-172.17.0.2-1734375515853 heartbeating to localhost/127.0.0.1:34465 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:58:42,246 WARN [BP-1294099631-172.17.0.2-1734375515853 heartbeating to localhost/127.0.0.1:34465 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1294099631-172.17.0.2-1734375515853 (Datanode Uuid ea053108-57e1-4001-81d9-9222ccbdc23c) service to localhost/127.0.0.1:34465 2024-12-16T18:58:42,246 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:58:42,246 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:58:42,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data3/current/BP-1294099631-172.17.0.2-1734375515853 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:42,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data4/current/BP-1294099631-172.17.0.2-1734375515853 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:42,247 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:58:42,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@523bfc81{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T18:58:42,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1328adb7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:42,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:42,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36e6628c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:42,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b36d04a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:42,251 WARN [BP-1294099631-172.17.0.2-1734375515853 heartbeating to localhost/127.0.0.1:34465 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T18:58:42,252 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T18:58:42,252 WARN [BP-1294099631-172.17.0.2-1734375515853 heartbeating to localhost/127.0.0.1:34465 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1294099631-172.17.0.2-1734375515853 (Datanode Uuid dbb34cf5-c96c-4001-b83b-44451337dff3) service to localhost/127.0.0.1:34465 2024-12-16T18:58:42,252 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T18:58:42,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data1/current/BP-1294099631-172.17.0.2-1734375515853 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:42,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/cluster_0f93094a-8559-9b27-343f-b957f4ad0f0b/dfs/data/data2/current/BP-1294099631-172.17.0.2-1734375515853 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T18:58:42,253 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T18:58:42,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5575d521{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T18:58:42,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10765324{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T18:58:42,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T18:58:42,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d3d25ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T18:58:42,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a3af2d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c40a8fc2-f2fc-3674-0559-2f195a31b207/hadoop.log.dir/,STOPPED} 2024-12-16T18:58:42,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T18:58:42,281 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-16T18:58:42,287 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=153 (was 132) - Thread LEAK? -, OpenFileDescriptor=514 (was 490) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=464 (was 442) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3788 (was 4217)